From 88ecbf26fb1be7695e071c001459e76b51a963e9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dominik=20Inf=C3=BChr?= Date: Wed, 12 Jan 2022 10:07:33 +0100 Subject: [PATCH] [heap] Refactor MemoryAllocator MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This CL doesn't change behavior, only refactors MemoryAllocator: * De-templatify class, MemoryAllocator is used on slow path and doesn't really need templates for performance. * Rename FreeMode names * Move methods into private section of class Change-Id: I7894fba956dcd7aa78ad0284d0924662fef4acae Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3379812 Reviewed-by: Michael Lippautz Commit-Queue: Dominik Inführ Cr-Commit-Position: refs/heads/main@{#78580} --- src/heap/large-spaces.cc | 7 +- src/heap/memory-allocator.cc | 108 +++++++-------- src/heap/memory-allocator.h | 159 +++++++++++------------ src/heap/new-spaces.cc | 32 ++--- src/heap/new-spaces.h | 2 +- src/heap/paged-spaces.cc | 8 +- src/heap/paged-spaces.h | 2 +- src/heap/read-only-spaces.cc | 2 +- src/heap/spaces.h | 5 + test/cctest/heap/test-spaces.cc | 14 +- test/unittests/heap/unmapper-unittest.cc | 6 +- 11 files changed, 162 insertions(+), 183 deletions(-) diff --git a/src/heap/large-spaces.cc b/src/heap/large-spaces.cc index 7d79c5cdd4..d563047d47 100644 --- a/src/heap/large-spaces.cc +++ b/src/heap/large-spaces.cc @@ -107,7 +107,7 @@ void LargeObjectSpace::TearDown() { DeleteEvent("LargeObjectChunk", reinterpret_cast(page->address()))); memory_chunk_list_.Remove(page); - heap()->memory_allocator()->Free(page); + heap()->memory_allocator()->Free(MemoryAllocator::kImmediately, page); } } @@ -324,8 +324,7 @@ void LargeObjectSpace::FreeUnmarkedObjects() { } } else { RemovePage(current, size); - heap()->memory_allocator()->Free( - current); + heap()->memory_allocator()->Free(MemoryAllocator::kConcurrently, current); } current = next_current; } @@ -531,7 +530,7 @@ void NewLargeObjectSpace::FreeDeadObjects( if (is_dead(object)) { freed_pages = true; RemovePage(page, size); - heap()->memory_allocator()->Free(page); + heap()->memory_allocator()->Free(MemoryAllocator::kConcurrently, page); if (FLAG_concurrent_marking && is_marking) { heap()->concurrent_marking()->ClearMemoryChunkData(page); } diff --git a/src/heap/memory-allocator.cc b/src/heap/memory-allocator.cc index d9552149c2..0a4e3aae2d 100644 --- a/src/heap/memory-allocator.cc +++ b/src/heap/memory-allocator.cc @@ -87,8 +87,8 @@ class MemoryAllocator::Unmapper::UnmapFreeMemoryJob : public JobTask { private: void RunImpl(JobDelegate* delegate) { - unmapper_->PerformFreeMemoryOnQueuedChunks( - delegate); + unmapper_->PerformFreeMemoryOnQueuedChunks(FreeMode::kUncommitPooled, + delegate); if (FLAG_trace_unmapper) { PrintIsolate(unmapper_->heap_->isolate(), "UnmapFreeMemoryTask Done\n"); } @@ -110,7 +110,7 @@ void MemoryAllocator::Unmapper::FreeQueuedChunks() { } } } else { - PerformFreeMemoryOnQueuedChunks(); + PerformFreeMemoryOnQueuedChunks(FreeMode::kUncommitPooled); } } @@ -131,21 +131,20 @@ void MemoryAllocator::Unmapper::PrepareForGC() { void MemoryAllocator::Unmapper::EnsureUnmappingCompleted() { CancelAndWaitForPendingTasks(); - PerformFreeMemoryOnQueuedChunks(); + PerformFreeMemoryOnQueuedChunks(FreeMode::kFreePooled); } void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedNonRegularChunks( JobDelegate* delegate) { MemoryChunk* chunk = nullptr; - while ((chunk = GetMemoryChunkSafe()) != nullptr) { + while ((chunk = GetMemoryChunkSafe(kNonRegular)) != nullptr) { allocator_->PerformFreeMemory(chunk); if (delegate && delegate->ShouldYield()) return; } } -template void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks( - JobDelegate* delegate) { + MemoryAllocator::Unmapper::FreeMode mode, JobDelegate* delegate) { MemoryChunk* chunk = nullptr; if (FLAG_trace_unmapper) { PrintIsolate( @@ -154,18 +153,18 @@ void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks( NumberOfChunks()); } // Regular chunks. - while ((chunk = GetMemoryChunkSafe()) != nullptr) { + while ((chunk = GetMemoryChunkSafe(kRegular)) != nullptr) { bool pooled = chunk->IsFlagSet(MemoryChunk::POOLED); allocator_->PerformFreeMemory(chunk); - if (pooled) AddMemoryChunkSafe(chunk); + if (pooled) AddMemoryChunkSafe(kPooled, chunk); if (delegate && delegate->ShouldYield()) return; } - if (mode == MemoryAllocator::Unmapper::FreeMode::kReleasePooled) { + if (mode == MemoryAllocator::Unmapper::FreeMode::kFreePooled) { // The previous loop uncommitted any pages marked as pooled and added them - // to the pooled list. In case of kReleasePooled we need to free them - // though. - while ((chunk = GetMemoryChunkSafe()) != nullptr) { - allocator_->Free(chunk); + // to the pooled list. In case of kFreePooled we need to free them though as + // well. + while ((chunk = GetMemoryChunkSafe(kPooled)) != nullptr) { + allocator_->FreePooledChunk(chunk); if (delegate && delegate->ShouldYield()) return; } } @@ -174,7 +173,7 @@ void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks( void MemoryAllocator::Unmapper::TearDown() { CHECK(!job_handle_ || !job_handle_->IsValid()); - PerformFreeMemoryOnQueuedChunks(); + PerformFreeMemoryOnQueuedChunks(FreeMode::kFreePooled); for (int i = 0; i < kNumberOfChunkQueues; i++) { DCHECK(chunks_[i].empty()); } @@ -228,8 +227,8 @@ bool MemoryAllocator::UncommitMemory(VirtualMemory* reservation) { return true; } -void MemoryAllocator::FreeMemory(v8::PageAllocator* page_allocator, - Address base, size_t size) { +void MemoryAllocator::FreeMemoryRegion(v8::PageAllocator* page_allocator, + Address base, size_t size) { CHECK(FreePages(page_allocator, reinterpret_cast(base), size)); } @@ -440,7 +439,8 @@ void MemoryAllocator::PartialFreeMemory(BasicMemoryChunk* chunk, size_ -= released_bytes; } -void MemoryAllocator::UnregisterSharedMemory(BasicMemoryChunk* chunk) { +void MemoryAllocator::UnregisterSharedBasicMemoryChunk( + BasicMemoryChunk* chunk) { VirtualMemory* reservation = chunk->reserved_memory(); const size_t size = reservation->IsReserved() ? reservation->size() : chunk->size(); @@ -448,8 +448,8 @@ void MemoryAllocator::UnregisterSharedMemory(BasicMemoryChunk* chunk) { size_ -= size; } -void MemoryAllocator::UnregisterMemory(BasicMemoryChunk* chunk, - Executability executable) { +void MemoryAllocator::UnregisterBasicMemoryChunk(BasicMemoryChunk* chunk, + Executability executable) { DCHECK(!chunk->IsFlagSet(MemoryChunk::UNREGISTERED)); VirtualMemory* reservation = chunk->reserved_memory(); const size_t size = @@ -469,15 +469,20 @@ void MemoryAllocator::UnregisterMemory(BasicMemoryChunk* chunk, chunk->SetFlag(MemoryChunk::UNREGISTERED); } -void MemoryAllocator::UnregisterMemory(MemoryChunk* chunk) { - UnregisterMemory(chunk, chunk->executable()); +void MemoryAllocator::UnregisterMemoryChunk(MemoryChunk* chunk) { + UnregisterBasicMemoryChunk(chunk, chunk->executable()); +} + +void MemoryAllocator::UnregisterReadOnlyPage(ReadOnlyPage* page) { + DCHECK(!page->executable()); + UnregisterBasicMemoryChunk(page, NOT_EXECUTABLE); } void MemoryAllocator::FreeReadOnlyPage(ReadOnlyPage* chunk) { DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED)); LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); - UnregisterSharedMemory(chunk); + UnregisterSharedBasicMemoryChunk(chunk); v8::PageAllocator* allocator = page_allocator(NOT_EXECUTABLE); VirtualMemory* reservation = chunk->reserved_memory(); @@ -487,15 +492,15 @@ void MemoryAllocator::FreeReadOnlyPage(ReadOnlyPage* chunk) { // Only read-only pages can have a non-initialized reservation object. This // happens when the pages are remapped to multiple locations and where the // reservation would therefore be invalid. - FreeMemory(allocator, chunk->address(), - RoundUp(chunk->size(), allocator->AllocatePageSize())); + FreeMemoryRegion(allocator, chunk->address(), + RoundUp(chunk->size(), allocator->AllocatePageSize())); } } void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) { DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED)); LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); - UnregisterMemory(chunk); + UnregisterMemoryChunk(chunk); isolate_->heap()->RememberUnmappedPage(reinterpret_cast
(chunk), chunk->IsEvacuationCandidate()); chunk->SetFlag(MemoryChunk::PRE_FREED); @@ -516,25 +521,18 @@ void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) { } } -template -void MemoryAllocator::Free(MemoryChunk* chunk) { +void MemoryAllocator::Free(MemoryAllocator::FreeMode mode, MemoryChunk* chunk) { switch (mode) { - case kFull: + case kImmediately: PreFreeMemory(chunk); PerformFreeMemory(chunk); break; - case kAlreadyPooled: - // Pooled pages cannot be touched anymore as their memory is uncommitted. - // Pooled pages are not-executable. - FreeMemory(data_page_allocator(), chunk->address(), - static_cast(MemoryChunk::kPageSize)); - break; - case kPooledAndQueue: + case kConcurrentlyAndPool: DCHECK_EQ(chunk->size(), static_cast(MemoryChunk::kPageSize)); DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE); chunk->SetFlag(MemoryChunk::POOLED); V8_FALLTHROUGH; - case kPreFreeAndQueue: + case kConcurrently: PreFreeMemory(chunk); // The chunks added to this queue will be freed by a concurrent thread. unmapper()->AddMemoryChunkSafe(chunk); @@ -542,23 +540,18 @@ void MemoryAllocator::Free(MemoryChunk* chunk) { } } -template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free< - MemoryAllocator::kFull>(MemoryChunk* chunk); +void MemoryAllocator::FreePooledChunk(MemoryChunk* chunk) { + // Pooled pages cannot be touched anymore as their memory is uncommitted. + // Pooled pages are not-executable. + FreeMemoryRegion(data_page_allocator(), chunk->address(), + static_cast(MemoryChunk::kPageSize)); +} -template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free< - MemoryAllocator::kAlreadyPooled>(MemoryChunk* chunk); - -template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free< - MemoryAllocator::kPreFreeAndQueue>(MemoryChunk* chunk); - -template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free< - MemoryAllocator::kPooledAndQueue>(MemoryChunk* chunk); - -template -Page* MemoryAllocator::AllocatePage(size_t size, SpaceType* owner, +Page* MemoryAllocator::AllocatePage(MemoryAllocator::AllocationMode alloc_mode, + size_t size, Space* owner, Executability executable) { MemoryChunk* chunk = nullptr; - if (alloc_mode == kPooled) { + if (alloc_mode == kUsePool) { DCHECK_EQ(size, static_cast( MemoryChunkLayout::AllocatableMemoryInMemoryChunk( owner->identity()))); @@ -572,16 +565,6 @@ Page* MemoryAllocator::AllocatePage(size_t size, SpaceType* owner, return owner->InitializePage(chunk); } -template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) - Page* MemoryAllocator::AllocatePage( - size_t size, PagedSpace* owner, Executability executable); -template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) - Page* MemoryAllocator::AllocatePage( - size_t size, SemiSpace* owner, Executability executable); -template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) - Page* MemoryAllocator::AllocatePage( - size_t size, SemiSpace* owner, Executability executable); - ReadOnlyPage* MemoryAllocator::AllocateReadOnlyPage(size_t size, ReadOnlySpace* owner) { BasicMemoryChunk* chunk = @@ -604,8 +587,7 @@ LargePage* MemoryAllocator::AllocateLargePage(size_t size, return LargePage::Initialize(isolate_->heap(), chunk, executable); } -template -MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) { +MemoryChunk* MemoryAllocator::AllocatePagePooled(Space* owner) { MemoryChunk* chunk = unmapper()->TryGetPooledMemoryChunkSafe(); if (chunk == nullptr) return nullptr; const int size = MemoryChunk::kPageSize; diff --git a/src/heap/memory-allocator.h b/src/heap/memory-allocator.h index 49b5a769cf..57ecd82416 100644 --- a/src/heap/memory-allocator.h +++ b/src/heap/memory-allocator.h @@ -50,9 +50,9 @@ class MemoryAllocator { void AddMemoryChunkSafe(MemoryChunk* chunk) { if (!chunk->IsLargePage() && chunk->executable() != EXECUTABLE) { - AddMemoryChunkSafe(chunk); + AddMemoryChunkSafe(kRegular, chunk); } else { - AddMemoryChunkSafe(chunk); + AddMemoryChunkSafe(kNonRegular, chunk); } } @@ -61,10 +61,10 @@ class MemoryAllocator { // (1) Try to get a chunk that was declared as pooled and already has // been uncommitted. // (2) Try to steal any memory chunk of kPageSize that would've been - // unmapped. - MemoryChunk* chunk = GetMemoryChunkSafe(); + // uncommitted. + MemoryChunk* chunk = GetMemoryChunkSafe(kPooled); if (chunk == nullptr) { - chunk = GetMemoryChunkSafe(); + chunk = GetMemoryChunkSafe(kRegular); if (chunk != nullptr) { // For stolen chunks we need to manually free any allocated memory. chunk->ReleaseAllAllocatedMemory(); @@ -90,23 +90,24 @@ class MemoryAllocator { kRegular, // Pages of kPageSize that do not live in a CodeRange and // can thus be used for stealing. kNonRegular, // Large chunks and executable chunks. - kPooled, // Pooled chunks, already uncommited and ready for reuse. + kPooled, // Pooled chunks, already freed and ready for reuse. kNumberOfChunkQueues, }; enum class FreeMode { + // Disables any access on pooled pages before adding them to the pool. kUncommitPooled, - kReleasePooled, + + // Free pooled pages. Only used on tear down and last-resort GCs. + kFreePooled, }; - template - void AddMemoryChunkSafe(MemoryChunk* chunk) { + void AddMemoryChunkSafe(ChunkQueueType type, MemoryChunk* chunk) { base::MutexGuard guard(&mutex_); chunks_[type].push_back(chunk); } - template - MemoryChunk* GetMemoryChunkSafe() { + MemoryChunk* GetMemoryChunkSafe(ChunkQueueType type) { base::MutexGuard guard(&mutex_); if (chunks_[type].empty()) return nullptr; MemoryChunk* chunk = chunks_[type].back(); @@ -116,8 +117,8 @@ class MemoryAllocator { bool MakeRoomForNewTasks(); - template - void PerformFreeMemoryOnQueuedChunks(JobDelegate* delegate = nullptr); + void PerformFreeMemoryOnQueuedChunks(FreeMode mode, + JobDelegate* delegate = nullptr); void PerformFreeMemoryOnQueuedNonRegularChunks( JobDelegate* delegate = nullptr); @@ -132,15 +133,24 @@ class MemoryAllocator { }; enum AllocationMode { + // Regular allocation path. Does not use pool. kRegular, - kPooled, + + // Uses the pool for allocation first. + kUsePool, }; enum FreeMode { - kFull, - kAlreadyPooled, - kPreFreeAndQueue, - kPooledAndQueue, + // Frees page immediately on the main thread. + kImmediately, + + // Frees page on background thread. + kConcurrently, + + // Uncommits but does not free page on background thread. Page is added to + // pool. Used to avoid the munmap/mmap-cycle when we quickly reallocate + // pages. + kConcurrentlyAndPool, }; V8_EXPORT_PRIVATE static intptr_t GetCommitPageSize(); @@ -160,10 +170,9 @@ class MemoryAllocator { // Allocates a Page from the allocator. AllocationMode is used to indicate // whether pooled allocation, which only works for MemoryChunk::kPageSize, // should be tried first. - template - EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) - Page* AllocatePage(size_t size, SpaceType* owner, Executability executable); + V8_EXPORT_PRIVATE Page* AllocatePage( + MemoryAllocator::AllocationMode alloc_mode, size_t size, Space* owner, + Executability executable); LargePage* AllocateLargePage(size_t size, LargeObjectSpace* owner, Executability executable); @@ -173,9 +182,8 @@ class MemoryAllocator { std::unique_ptr<::v8::PageAllocator::SharedMemoryMapping> RemapSharedPage( ::v8::PageAllocator::SharedMemory* shared_memory, Address new_address); - template - EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) - void Free(MemoryChunk* chunk); + V8_EXPORT_PRIVATE void Free(MemoryAllocator::FreeMode mode, + MemoryChunk* chunk); void FreeReadOnlyPage(ReadOnlyPage* chunk); // Returns allocated spaces in bytes. @@ -197,13 +205,6 @@ class MemoryAllocator { address >= highest_ever_allocated_; } - // Returns a BasicMemoryChunk in which the memory region from commit_area_size - // to reserve_area_size of the chunk area is reserved but not committed, it - // could be committed later by calling MemoryChunk::CommitArea. - V8_EXPORT_PRIVATE BasicMemoryChunk* AllocateBasicChunk( - size_t reserve_area_size, size_t commit_area_size, - Executability executable, BaseSpace* space); - // Returns a MemoryChunk in which the memory region from commit_area_size to // reserve_area_size of the chunk area is reserved but not committed, it // could be committed later by calling MemoryChunk::CommitArea. @@ -212,12 +213,6 @@ class MemoryAllocator { Executability executable, BaseSpace* space); - Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size, - size_t alignment, Executability executable, - void* hint, VirtualMemory* controller); - - void FreeMemory(v8::PageAllocator* page_allocator, Address addr, size_t size); - // Partially release |bytes_to_free| bytes starting at |start_free|. Note that // internally memory is freed from |start_free| to the end of the reservation. // Additional memory beyond the page is not accounted though, so @@ -234,23 +229,10 @@ class MemoryAllocator { } #endif // DEBUG - // Commit memory region owned by given reservation object. Returns true if - // it succeeded and false otherwise. - bool CommitMemory(VirtualMemory* reservation); - - // Uncommit memory region owned by given reservation object. Returns true if - // it succeeded and false otherwise. - bool UncommitMemory(VirtualMemory* reservation); - // Zaps a contiguous block of memory [start..(start+size)[ with // a given zap value. void ZapBlock(Address start, size_t size, uintptr_t zap_value); - V8_WARN_UNUSED_RESULT bool CommitExecutableMemory(VirtualMemory* vm, - Address start, - size_t commit_size, - size_t reserved_size); - // Page allocator instance for allocating non-executable pages. // Guaranteed to be a valid pointer. v8::PageAllocator* data_page_allocator() { return data_page_allocator_; } @@ -268,16 +250,37 @@ class MemoryAllocator { Unmapper* unmapper() { return &unmapper_; } - // Performs all necessary bookkeeping to free the memory, but does not free - // it. - void UnregisterMemory(MemoryChunk* chunk); - void UnregisterMemory(BasicMemoryChunk* chunk, - Executability executable = NOT_EXECUTABLE); - void UnregisterSharedMemory(BasicMemoryChunk* chunk); - - void RegisterReadOnlyMemory(ReadOnlyPage* page); + void UnregisterReadOnlyPage(ReadOnlyPage* page); private: + // Returns a BasicMemoryChunk in which the memory region from commit_area_size + // to reserve_area_size of the chunk area is reserved but not committed, it + // could be committed later by calling MemoryChunk::CommitArea. + V8_EXPORT_PRIVATE BasicMemoryChunk* AllocateBasicChunk( + size_t reserve_area_size, size_t commit_area_size, + Executability executable, BaseSpace* space); + + Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size, + size_t alignment, Executability executable, + void* hint, VirtualMemory* controller); + + // Commit memory region owned by given reservation object. Returns true if + // it succeeded and false otherwise. + bool CommitMemory(VirtualMemory* reservation); + + V8_WARN_UNUSED_RESULT bool CommitExecutableMemory(VirtualMemory* vm, + Address start, + size_t commit_size, + size_t reserved_size); + + // Disallows any access on memory region owned by given reservation object. + // Returns true if it succeeded and false otherwise. + bool UncommitMemory(VirtualMemory* reservation); + + // Frees the given memory region. + void FreeMemoryRegion(v8::PageAllocator* page_allocator, Address addr, + size_t size); + // PreFreeMemory logically frees the object, i.e., it unregisters the // memory, logs a delete event and adds the chunk to remembered unmapped // pages. @@ -289,8 +292,10 @@ class MemoryAllocator { // See AllocatePage for public interface. Note that currently we only // support pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize. - template - MemoryChunk* AllocatePagePooled(SpaceType* owner); + MemoryChunk* AllocatePagePooled(Space* owner); + + // Frees a pooled page. Only used on tear-down and last-resort GCs. + void FreePooledChunk(MemoryChunk* chunk); // Initializes pages in a chunk. Returns the first page address. // This function and GetChunkId() are provided for the mark-compact @@ -313,6 +318,15 @@ class MemoryAllocator { } } + // Performs all necessary bookkeeping to free the memory, but does not free + // it. + void UnregisterMemoryChunk(MemoryChunk* chunk); + void UnregisterSharedBasicMemoryChunk(BasicMemoryChunk* chunk); + void UnregisterBasicMemoryChunk(BasicMemoryChunk* chunk, + Executability executable = NOT_EXECUTABLE); + + void RegisterReadOnlyMemory(ReadOnlyPage* page); + #ifdef DEBUG void RegisterExecutableMemoryChunk(MemoryChunk* chunk) { base::MutexGuard guard(&executable_memory_mutex_); @@ -375,29 +389,6 @@ class MemoryAllocator { DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator); }; -extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) - Page* MemoryAllocator::AllocatePage( - size_t size, PagedSpace* owner, Executability executable); -extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) - Page* MemoryAllocator::AllocatePage( - size_t size, SemiSpace* owner, Executability executable); -extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE) - Page* MemoryAllocator::AllocatePage( - size_t size, SemiSpace* owner, Executability executable); - -extern template EXPORT_TEMPLATE_DECLARE( - V8_EXPORT_PRIVATE) void MemoryAllocator:: - Free(MemoryChunk* chunk); -extern template EXPORT_TEMPLATE_DECLARE( - V8_EXPORT_PRIVATE) void MemoryAllocator:: - Free(MemoryChunk* chunk); -extern template EXPORT_TEMPLATE_DECLARE( - V8_EXPORT_PRIVATE) void MemoryAllocator:: - Free(MemoryChunk* chunk); -extern template EXPORT_TEMPLATE_DECLARE( - V8_EXPORT_PRIVATE) void MemoryAllocator:: - Free(MemoryChunk* chunk); - } // namespace internal } // namespace v8 diff --git a/src/heap/new-spaces.cc b/src/heap/new-spaces.cc index 6155a06f77..fbbef58120 100644 --- a/src/heap/new-spaces.cc +++ b/src/heap/new-spaces.cc @@ -58,8 +58,8 @@ bool SemiSpace::EnsureCurrentCapacity() { // Clear new space flags to avoid this page being treated as a new // space page that is potentially being swept. current_page->ClearFlags(Page::kIsInYoungGenerationMask); - heap()->memory_allocator()->Free( - current_page); + heap()->memory_allocator()->Free(MemoryAllocator::kConcurrentlyAndPool, + current_page); current_page = next_current; } @@ -68,10 +68,10 @@ bool SemiSpace::EnsureCurrentCapacity() { heap()->incremental_marking()->non_atomic_marking_state(); while (actual_pages < expected_pages) { actual_pages++; - current_page = - heap()->memory_allocator()->AllocatePage( - MemoryChunkLayout::AllocatableMemoryInDataPage(), this, - NOT_EXECUTABLE); + current_page = heap()->memory_allocator()->AllocatePage( + MemoryAllocator::kUsePool, + MemoryChunkLayout::AllocatableMemoryInDataPage(), this, + NOT_EXECUTABLE); if (current_page == nullptr) return false; DCHECK_NOT_NULL(current_page); memory_chunk_list_.PushBack(current_page); @@ -111,10 +111,9 @@ bool SemiSpace::Commit() { // Pages in the new spaces can be moved to the old space by the full // collector. Therefore, they must be initialized with the same FreeList as // old pages. - Page* new_page = - heap()->memory_allocator()->AllocatePage( - MemoryChunkLayout::AllocatableMemoryInDataPage(), this, - NOT_EXECUTABLE); + Page* new_page = heap()->memory_allocator()->AllocatePage( + MemoryAllocator::kUsePool, + MemoryChunkLayout::AllocatableMemoryInDataPage(), this, NOT_EXECUTABLE); if (new_page == nullptr) { if (pages_added) RewindPages(pages_added); DCHECK(!IsCommitted()); @@ -136,7 +135,8 @@ bool SemiSpace::Uncommit() { while (!memory_chunk_list_.Empty()) { MemoryChunk* chunk = memory_chunk_list_.front(); memory_chunk_list_.Remove(chunk); - heap()->memory_allocator()->Free(chunk); + heap()->memory_allocator()->Free(MemoryAllocator::kConcurrentlyAndPool, + chunk); } current_page_ = nullptr; current_capacity_ = 0; @@ -169,10 +169,9 @@ bool SemiSpace::GrowTo(size_t new_capacity) { IncrementalMarking::NonAtomicMarkingState* marking_state = heap()->incremental_marking()->non_atomic_marking_state(); for (int pages_added = 0; pages_added < delta_pages; pages_added++) { - Page* new_page = - heap()->memory_allocator()->AllocatePage( - MemoryChunkLayout::AllocatableMemoryInDataPage(), this, - NOT_EXECUTABLE); + Page* new_page = heap()->memory_allocator()->AllocatePage( + MemoryAllocator::kUsePool, + MemoryChunkLayout::AllocatableMemoryInDataPage(), this, NOT_EXECUTABLE); if (new_page == nullptr) { if (pages_added) RewindPages(pages_added); return false; @@ -193,7 +192,8 @@ void SemiSpace::RewindPages(int num_pages) { while (num_pages > 0) { MemoryChunk* last = last_page(); memory_chunk_list_.Remove(last); - heap()->memory_allocator()->Free(last); + heap()->memory_allocator()->Free(MemoryAllocator::kConcurrentlyAndPool, + last); num_pages--; } } diff --git a/src/heap/new-spaces.h b/src/heap/new-spaces.h index 57093c3912..ed45de3a96 100644 --- a/src/heap/new-spaces.h +++ b/src/heap/new-spaces.h @@ -107,7 +107,7 @@ class SemiSpace : public Space { void PrependPage(Page* page); void MovePageToTheEnd(Page* page); - Page* InitializePage(MemoryChunk* chunk); + Page* InitializePage(MemoryChunk* chunk) override; // Age mark accessors. Address age_mark() { return age_mark_; } diff --git a/src/heap/paged-spaces.cc b/src/heap/paged-spaces.cc index 0db2d5f989..6dd8bc2c1c 100644 --- a/src/heap/paged-spaces.cc +++ b/src/heap/paged-spaces.cc @@ -103,7 +103,7 @@ void PagedSpace::TearDown() { while (!memory_chunk_list_.Empty()) { MemoryChunk* chunk = memory_chunk_list_.front(); memory_chunk_list_.Remove(chunk); - heap()->memory_allocator()->Free(chunk); + heap()->memory_allocator()->Free(MemoryAllocator::kImmediately, chunk); } accounting_stats_.Clear(); } @@ -319,8 +319,8 @@ void PagedSpace::ShrinkImmortalImmovablePages() { } Page* PagedSpace::AllocatePage() { - return heap()->memory_allocator()->AllocatePage(AreaSize(), this, - executable()); + return heap()->memory_allocator()->AllocatePage( + MemoryAllocator::kRegular, AreaSize(), this, executable()); } Page* PagedSpace::Expand() { @@ -493,7 +493,7 @@ void PagedSpace::ReleasePage(Page* page) { AccountUncommitted(page->size()); accounting_stats_.DecreaseCapacity(page->area_size()); - heap()->memory_allocator()->Free(page); + heap()->memory_allocator()->Free(MemoryAllocator::kConcurrently, page); } void PagedSpace::SetReadable() { diff --git a/src/heap/paged-spaces.h b/src/heap/paged-spaces.h index a331f394c0..a1ffc67dfe 100644 --- a/src/heap/paged-spaces.h +++ b/src/heap/paged-spaces.h @@ -216,7 +216,7 @@ class V8_EXPORT_PRIVATE PagedSpace void RefineAllocatedBytesAfterSweeping(Page* page); - Page* InitializePage(MemoryChunk* chunk); + Page* InitializePage(MemoryChunk* chunk) override; void ReleasePage(Page* page); diff --git a/src/heap/read-only-spaces.cc b/src/heap/read-only-spaces.cc index 3fa267d26c..fd4e790aa5 100644 --- a/src/heap/read-only-spaces.cc +++ b/src/heap/read-only-spaces.cc @@ -397,7 +397,7 @@ void ReadOnlySpace::Seal(SealMode ro_mode) { DetachFromHeap(); for (ReadOnlyPage* p : pages_) { if (ro_mode == SealMode::kDetachFromHeapAndUnregisterMemory) { - memory_allocator->UnregisterMemory(p); + memory_allocator->UnregisterReadOnlyPage(p); } if (ReadOnlyHeap::IsReadOnlySpaceShared()) { p->MakeHeaderRelocatable(); diff --git a/src/heap/spaces.h b/src/heap/spaces.h index 09c5572778..84ee3dbb3c 100644 --- a/src/heap/spaces.h +++ b/src/heap/spaces.h @@ -182,6 +182,11 @@ class V8_EXPORT_PRIVATE Space : public BaseSpace { heap::List& memory_chunk_list() { return memory_chunk_list_; } + virtual Page* InitializePage(MemoryChunk* chunk) { + UNREACHABLE(); + return nullptr; + } + FreeList* free_list() { return free_list_.get(); } Address FirstPageAddress() const { return first_page()->address(); } diff --git a/test/cctest/heap/test-spaces.cc b/test/cctest/heap/test-spaces.cc index 334c5137ac..91342a549b 100644 --- a/test/cctest/heap/test-spaces.cc +++ b/test/cctest/heap/test-spaces.cc @@ -144,7 +144,7 @@ static void VerifyMemoryChunk(Isolate* isolate, Heap* heap, memory_chunk->address() + memory_chunk->size()); CHECK(static_cast(memory_chunk->area_size()) == commit_area_size); - memory_allocator->Free(memory_chunk); + memory_allocator->Free(MemoryAllocator::kImmediately, memory_chunk); } static unsigned int PseudorandomAreaSize() { @@ -201,8 +201,8 @@ TEST(MemoryAllocator) { CHECK(!faked_space.first_page()); CHECK(!faked_space.last_page()); Page* first_page = memory_allocator->AllocatePage( - faked_space.AreaSize(), static_cast(&faked_space), - NOT_EXECUTABLE); + MemoryAllocator::kRegular, faked_space.AreaSize(), + static_cast(&faked_space), NOT_EXECUTABLE); faked_space.memory_chunk_list().PushBack(first_page); CHECK(first_page->next_page() == nullptr); @@ -214,8 +214,8 @@ TEST(MemoryAllocator) { // Again, we should get n or n - 1 pages. Page* other = memory_allocator->AllocatePage( - faked_space.AreaSize(), static_cast(&faked_space), - NOT_EXECUTABLE); + MemoryAllocator::kRegular, faked_space.AreaSize(), + static_cast(&faked_space), NOT_EXECUTABLE); total_pages++; faked_space.memory_chunk_list().PushBack(other); int page_count = 0; @@ -808,8 +808,8 @@ TEST(NoMemoryForNewPage) { LinearAllocationArea allocation_info; OldSpace faked_space(heap, &allocation_info); Page* page = memory_allocator->AllocatePage( - faked_space.AreaSize(), static_cast(&faked_space), - NOT_EXECUTABLE); + MemoryAllocator::kRegular, faked_space.AreaSize(), + static_cast(&faked_space), NOT_EXECUTABLE); CHECK_NULL(page); } diff --git a/test/unittests/heap/unmapper-unittest.cc b/test/unittests/heap/unmapper-unittest.cc index 27c7e0163b..aba0bdb964 100644 --- a/test/unittests/heap/unmapper-unittest.cc +++ b/test/unittests/heap/unmapper-unittest.cc @@ -312,6 +312,7 @@ bool SequentialUnmapperTest::old_flag_; TEST_F(SequentialUnmapperTest, UnmapOnTeardownAfterAlreadyFreeingPooled) { if (FLAG_enable_third_party_heap) return; Page* page = allocator()->AllocatePage( + MemoryAllocator::kRegular, MemoryChunkLayout::AllocatableMemoryInDataPage(), static_cast(heap()->old_space()), Executability::NOT_EXECUTABLE); @@ -319,7 +320,7 @@ TEST_F(SequentialUnmapperTest, UnmapOnTeardownAfterAlreadyFreeingPooled) { const size_t page_size = tracking_page_allocator()->AllocatePageSize(); tracking_page_allocator()->CheckPagePermissions(page->address(), page_size, PageAllocator::kReadWrite); - allocator()->Free(page); + allocator()->Free(MemoryAllocator::kConcurrentlyAndPool, page); tracking_page_allocator()->CheckPagePermissions(page->address(), page_size, PageAllocator::kReadWrite); unmapper()->FreeQueuedChunks(); @@ -341,6 +342,7 @@ TEST_F(SequentialUnmapperTest, UnmapOnTeardownAfterAlreadyFreeingPooled) { TEST_F(SequentialUnmapperTest, UnmapOnTeardown) { if (FLAG_enable_third_party_heap) return; Page* page = allocator()->AllocatePage( + MemoryAllocator::kRegular, MemoryChunkLayout::AllocatableMemoryInDataPage(), static_cast(heap()->old_space()), Executability::NOT_EXECUTABLE); @@ -349,7 +351,7 @@ TEST_F(SequentialUnmapperTest, UnmapOnTeardown) { tracking_page_allocator()->CheckPagePermissions(page->address(), page_size, PageAllocator::kReadWrite); - allocator()->Free(page); + allocator()->Free(MemoryAllocator::kConcurrentlyAndPool, page); tracking_page_allocator()->CheckPagePermissions(page->address(), page_size, PageAllocator::kReadWrite); unmapper()->TearDown();