[heap] Refactor MemoryAllocator

This CL doesn't change behavior, only refactors MemoryAllocator:

* De-templatify class, MemoryAllocator is used on slow path and doesn't
  really need templates for performance.
* Rename FreeMode names
* Move methods into private section of class

Change-Id: I7894fba956dcd7aa78ad0284d0924662fef4acae
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3379812
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/main@{#78580}
This commit is contained in:
Dominik Inführ 2022-01-12 10:07:33 +01:00 committed by V8 LUCI CQ
parent 12dc338531
commit 88ecbf26fb
11 changed files with 162 additions and 183 deletions

View File

@ -107,7 +107,7 @@ void LargeObjectSpace::TearDown() {
DeleteEvent("LargeObjectChunk",
reinterpret_cast<void*>(page->address())));
memory_chunk_list_.Remove(page);
heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
heap()->memory_allocator()->Free(MemoryAllocator::kImmediately, page);
}
}
@ -324,8 +324,7 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
}
} else {
RemovePage(current, size);
heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(
current);
heap()->memory_allocator()->Free(MemoryAllocator::kConcurrently, current);
}
current = next_current;
}
@ -531,7 +530,7 @@ void NewLargeObjectSpace::FreeDeadObjects(
if (is_dead(object)) {
freed_pages = true;
RemovePage(page, size);
heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
heap()->memory_allocator()->Free(MemoryAllocator::kConcurrently, page);
if (FLAG_concurrent_marking && is_marking) {
heap()->concurrent_marking()->ClearMemoryChunkData(page);
}

View File

@ -87,8 +87,8 @@ class MemoryAllocator::Unmapper::UnmapFreeMemoryJob : public JobTask {
private:
void RunImpl(JobDelegate* delegate) {
unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>(
delegate);
unmapper_->PerformFreeMemoryOnQueuedChunks(FreeMode::kUncommitPooled,
delegate);
if (FLAG_trace_unmapper) {
PrintIsolate(unmapper_->heap_->isolate(), "UnmapFreeMemoryTask Done\n");
}
@ -110,7 +110,7 @@ void MemoryAllocator::Unmapper::FreeQueuedChunks() {
}
}
} else {
PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
PerformFreeMemoryOnQueuedChunks(FreeMode::kUncommitPooled);
}
}
@ -131,21 +131,20 @@ void MemoryAllocator::Unmapper::PrepareForGC() {
void MemoryAllocator::Unmapper::EnsureUnmappingCompleted() {
CancelAndWaitForPendingTasks();
PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
PerformFreeMemoryOnQueuedChunks(FreeMode::kFreePooled);
}
void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedNonRegularChunks(
JobDelegate* delegate) {
MemoryChunk* chunk = nullptr;
while ((chunk = GetMemoryChunkSafe<kNonRegular>()) != nullptr) {
while ((chunk = GetMemoryChunkSafe(kNonRegular)) != nullptr) {
allocator_->PerformFreeMemory(chunk);
if (delegate && delegate->ShouldYield()) return;
}
}
template <MemoryAllocator::Unmapper::FreeMode mode>
void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks(
JobDelegate* delegate) {
MemoryAllocator::Unmapper::FreeMode mode, JobDelegate* delegate) {
MemoryChunk* chunk = nullptr;
if (FLAG_trace_unmapper) {
PrintIsolate(
@ -154,18 +153,18 @@ void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks(
NumberOfChunks());
}
// Regular chunks.
while ((chunk = GetMemoryChunkSafe<kRegular>()) != nullptr) {
while ((chunk = GetMemoryChunkSafe(kRegular)) != nullptr) {
bool pooled = chunk->IsFlagSet(MemoryChunk::POOLED);
allocator_->PerformFreeMemory(chunk);
if (pooled) AddMemoryChunkSafe<kPooled>(chunk);
if (pooled) AddMemoryChunkSafe(kPooled, chunk);
if (delegate && delegate->ShouldYield()) return;
}
if (mode == MemoryAllocator::Unmapper::FreeMode::kReleasePooled) {
if (mode == MemoryAllocator::Unmapper::FreeMode::kFreePooled) {
// The previous loop uncommitted any pages marked as pooled and added them
// to the pooled list. In case of kReleasePooled we need to free them
// though.
while ((chunk = GetMemoryChunkSafe<kPooled>()) != nullptr) {
allocator_->Free<MemoryAllocator::kAlreadyPooled>(chunk);
// to the pooled list. In case of kFreePooled we need to free them though as
// well.
while ((chunk = GetMemoryChunkSafe(kPooled)) != nullptr) {
allocator_->FreePooledChunk(chunk);
if (delegate && delegate->ShouldYield()) return;
}
}
@ -174,7 +173,7 @@ void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks(
void MemoryAllocator::Unmapper::TearDown() {
CHECK(!job_handle_ || !job_handle_->IsValid());
PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
PerformFreeMemoryOnQueuedChunks(FreeMode::kFreePooled);
for (int i = 0; i < kNumberOfChunkQueues; i++) {
DCHECK(chunks_[i].empty());
}
@ -228,8 +227,8 @@ bool MemoryAllocator::UncommitMemory(VirtualMemory* reservation) {
return true;
}
void MemoryAllocator::FreeMemory(v8::PageAllocator* page_allocator,
Address base, size_t size) {
void MemoryAllocator::FreeMemoryRegion(v8::PageAllocator* page_allocator,
Address base, size_t size) {
CHECK(FreePages(page_allocator, reinterpret_cast<void*>(base), size));
}
@ -440,7 +439,8 @@ void MemoryAllocator::PartialFreeMemory(BasicMemoryChunk* chunk,
size_ -= released_bytes;
}
void MemoryAllocator::UnregisterSharedMemory(BasicMemoryChunk* chunk) {
void MemoryAllocator::UnregisterSharedBasicMemoryChunk(
BasicMemoryChunk* chunk) {
VirtualMemory* reservation = chunk->reserved_memory();
const size_t size =
reservation->IsReserved() ? reservation->size() : chunk->size();
@ -448,8 +448,8 @@ void MemoryAllocator::UnregisterSharedMemory(BasicMemoryChunk* chunk) {
size_ -= size;
}
void MemoryAllocator::UnregisterMemory(BasicMemoryChunk* chunk,
Executability executable) {
void MemoryAllocator::UnregisterBasicMemoryChunk(BasicMemoryChunk* chunk,
Executability executable) {
DCHECK(!chunk->IsFlagSet(MemoryChunk::UNREGISTERED));
VirtualMemory* reservation = chunk->reserved_memory();
const size_t size =
@ -469,15 +469,20 @@ void MemoryAllocator::UnregisterMemory(BasicMemoryChunk* chunk,
chunk->SetFlag(MemoryChunk::UNREGISTERED);
}
void MemoryAllocator::UnregisterMemory(MemoryChunk* chunk) {
UnregisterMemory(chunk, chunk->executable());
void MemoryAllocator::UnregisterMemoryChunk(MemoryChunk* chunk) {
UnregisterBasicMemoryChunk(chunk, chunk->executable());
}
void MemoryAllocator::UnregisterReadOnlyPage(ReadOnlyPage* page) {
DCHECK(!page->executable());
UnregisterBasicMemoryChunk(page, NOT_EXECUTABLE);
}
void MemoryAllocator::FreeReadOnlyPage(ReadOnlyPage* chunk) {
DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
UnregisterSharedMemory(chunk);
UnregisterSharedBasicMemoryChunk(chunk);
v8::PageAllocator* allocator = page_allocator(NOT_EXECUTABLE);
VirtualMemory* reservation = chunk->reserved_memory();
@ -487,15 +492,15 @@ void MemoryAllocator::FreeReadOnlyPage(ReadOnlyPage* chunk) {
// Only read-only pages can have a non-initialized reservation object. This
// happens when the pages are remapped to multiple locations and where the
// reservation would therefore be invalid.
FreeMemory(allocator, chunk->address(),
RoundUp(chunk->size(), allocator->AllocatePageSize()));
FreeMemoryRegion(allocator, chunk->address(),
RoundUp(chunk->size(), allocator->AllocatePageSize()));
}
}
void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
UnregisterMemory(chunk);
UnregisterMemoryChunk(chunk);
isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
chunk->IsEvacuationCandidate());
chunk->SetFlag(MemoryChunk::PRE_FREED);
@ -516,25 +521,18 @@ void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
}
}
template <MemoryAllocator::FreeMode mode>
void MemoryAllocator::Free(MemoryChunk* chunk) {
void MemoryAllocator::Free(MemoryAllocator::FreeMode mode, MemoryChunk* chunk) {
switch (mode) {
case kFull:
case kImmediately:
PreFreeMemory(chunk);
PerformFreeMemory(chunk);
break;
case kAlreadyPooled:
// Pooled pages cannot be touched anymore as their memory is uncommitted.
// Pooled pages are not-executable.
FreeMemory(data_page_allocator(), chunk->address(),
static_cast<size_t>(MemoryChunk::kPageSize));
break;
case kPooledAndQueue:
case kConcurrentlyAndPool:
DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
chunk->SetFlag(MemoryChunk::POOLED);
V8_FALLTHROUGH;
case kPreFreeAndQueue:
case kConcurrently:
PreFreeMemory(chunk);
// The chunks added to this queue will be freed by a concurrent thread.
unmapper()->AddMemoryChunkSafe(chunk);
@ -542,23 +540,18 @@ void MemoryAllocator::Free(MemoryChunk* chunk) {
}
}
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
MemoryAllocator::kFull>(MemoryChunk* chunk);
void MemoryAllocator::FreePooledChunk(MemoryChunk* chunk) {
// Pooled pages cannot be touched anymore as their memory is uncommitted.
// Pooled pages are not-executable.
FreeMemoryRegion(data_page_allocator(), chunk->address(),
static_cast<size_t>(MemoryChunk::kPageSize));
}
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
MemoryAllocator::kAlreadyPooled>(MemoryChunk* chunk);
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
MemoryAllocator::kPreFreeAndQueue>(MemoryChunk* chunk);
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
MemoryAllocator::kPooledAndQueue>(MemoryChunk* chunk);
template <MemoryAllocator::AllocationMode alloc_mode, typename SpaceType>
Page* MemoryAllocator::AllocatePage(size_t size, SpaceType* owner,
Page* MemoryAllocator::AllocatePage(MemoryAllocator::AllocationMode alloc_mode,
size_t size, Space* owner,
Executability executable) {
MemoryChunk* chunk = nullptr;
if (alloc_mode == kPooled) {
if (alloc_mode == kUsePool) {
DCHECK_EQ(size, static_cast<size_t>(
MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
owner->identity())));
@ -572,16 +565,6 @@ Page* MemoryAllocator::AllocatePage(size_t size, SpaceType* owner,
return owner->InitializePage(chunk);
}
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
size_t size, PagedSpace* owner, Executability executable);
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
size_t size, SemiSpace* owner, Executability executable);
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
Page* MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
size_t size, SemiSpace* owner, Executability executable);
ReadOnlyPage* MemoryAllocator::AllocateReadOnlyPage(size_t size,
ReadOnlySpace* owner) {
BasicMemoryChunk* chunk =
@ -604,8 +587,7 @@ LargePage* MemoryAllocator::AllocateLargePage(size_t size,
return LargePage::Initialize(isolate_->heap(), chunk, executable);
}
template <typename SpaceType>
MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
MemoryChunk* MemoryAllocator::AllocatePagePooled(Space* owner) {
MemoryChunk* chunk = unmapper()->TryGetPooledMemoryChunkSafe();
if (chunk == nullptr) return nullptr;
const int size = MemoryChunk::kPageSize;

View File

@ -50,9 +50,9 @@ class MemoryAllocator {
void AddMemoryChunkSafe(MemoryChunk* chunk) {
if (!chunk->IsLargePage() && chunk->executable() != EXECUTABLE) {
AddMemoryChunkSafe<kRegular>(chunk);
AddMemoryChunkSafe(kRegular, chunk);
} else {
AddMemoryChunkSafe<kNonRegular>(chunk);
AddMemoryChunkSafe(kNonRegular, chunk);
}
}
@ -61,10 +61,10 @@ class MemoryAllocator {
// (1) Try to get a chunk that was declared as pooled and already has
// been uncommitted.
// (2) Try to steal any memory chunk of kPageSize that would've been
// unmapped.
MemoryChunk* chunk = GetMemoryChunkSafe<kPooled>();
// uncommitted.
MemoryChunk* chunk = GetMemoryChunkSafe(kPooled);
if (chunk == nullptr) {
chunk = GetMemoryChunkSafe<kRegular>();
chunk = GetMemoryChunkSafe(kRegular);
if (chunk != nullptr) {
// For stolen chunks we need to manually free any allocated memory.
chunk->ReleaseAllAllocatedMemory();
@ -90,23 +90,24 @@ class MemoryAllocator {
kRegular, // Pages of kPageSize that do not live in a CodeRange and
// can thus be used for stealing.
kNonRegular, // Large chunks and executable chunks.
kPooled, // Pooled chunks, already uncommited and ready for reuse.
kPooled, // Pooled chunks, already freed and ready for reuse.
kNumberOfChunkQueues,
};
enum class FreeMode {
// Disables any access on pooled pages before adding them to the pool.
kUncommitPooled,
kReleasePooled,
// Free pooled pages. Only used on tear down and last-resort GCs.
kFreePooled,
};
template <ChunkQueueType type>
void AddMemoryChunkSafe(MemoryChunk* chunk) {
void AddMemoryChunkSafe(ChunkQueueType type, MemoryChunk* chunk) {
base::MutexGuard guard(&mutex_);
chunks_[type].push_back(chunk);
}
template <ChunkQueueType type>
MemoryChunk* GetMemoryChunkSafe() {
MemoryChunk* GetMemoryChunkSafe(ChunkQueueType type) {
base::MutexGuard guard(&mutex_);
if (chunks_[type].empty()) return nullptr;
MemoryChunk* chunk = chunks_[type].back();
@ -116,8 +117,8 @@ class MemoryAllocator {
bool MakeRoomForNewTasks();
template <FreeMode mode>
void PerformFreeMemoryOnQueuedChunks(JobDelegate* delegate = nullptr);
void PerformFreeMemoryOnQueuedChunks(FreeMode mode,
JobDelegate* delegate = nullptr);
void PerformFreeMemoryOnQueuedNonRegularChunks(
JobDelegate* delegate = nullptr);
@ -132,15 +133,24 @@ class MemoryAllocator {
};
enum AllocationMode {
// Regular allocation path. Does not use pool.
kRegular,
kPooled,
// Uses the pool for allocation first.
kUsePool,
};
enum FreeMode {
kFull,
kAlreadyPooled,
kPreFreeAndQueue,
kPooledAndQueue,
// Frees page immediately on the main thread.
kImmediately,
// Frees page on background thread.
kConcurrently,
// Uncommits but does not free page on background thread. Page is added to
// pool. Used to avoid the munmap/mmap-cycle when we quickly reallocate
// pages.
kConcurrentlyAndPool,
};
V8_EXPORT_PRIVATE static intptr_t GetCommitPageSize();
@ -160,10 +170,9 @@ class MemoryAllocator {
// Allocates a Page from the allocator. AllocationMode is used to indicate
// whether pooled allocation, which only works for MemoryChunk::kPageSize,
// should be tried first.
template <MemoryAllocator::AllocationMode alloc_mode = kRegular,
typename SpaceType>
EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
Page* AllocatePage(size_t size, SpaceType* owner, Executability executable);
V8_EXPORT_PRIVATE Page* AllocatePage(
MemoryAllocator::AllocationMode alloc_mode, size_t size, Space* owner,
Executability executable);
LargePage* AllocateLargePage(size_t size, LargeObjectSpace* owner,
Executability executable);
@ -173,9 +182,8 @@ class MemoryAllocator {
std::unique_ptr<::v8::PageAllocator::SharedMemoryMapping> RemapSharedPage(
::v8::PageAllocator::SharedMemory* shared_memory, Address new_address);
template <MemoryAllocator::FreeMode mode = kFull>
EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
void Free(MemoryChunk* chunk);
V8_EXPORT_PRIVATE void Free(MemoryAllocator::FreeMode mode,
MemoryChunk* chunk);
void FreeReadOnlyPage(ReadOnlyPage* chunk);
// Returns allocated spaces in bytes.
@ -197,13 +205,6 @@ class MemoryAllocator {
address >= highest_ever_allocated_;
}
// Returns a BasicMemoryChunk in which the memory region from commit_area_size
// to reserve_area_size of the chunk area is reserved but not committed, it
// could be committed later by calling MemoryChunk::CommitArea.
V8_EXPORT_PRIVATE BasicMemoryChunk* AllocateBasicChunk(
size_t reserve_area_size, size_t commit_area_size,
Executability executable, BaseSpace* space);
// Returns a MemoryChunk in which the memory region from commit_area_size to
// reserve_area_size of the chunk area is reserved but not committed, it
// could be committed later by calling MemoryChunk::CommitArea.
@ -212,12 +213,6 @@ class MemoryAllocator {
Executability executable,
BaseSpace* space);
Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
size_t alignment, Executability executable,
void* hint, VirtualMemory* controller);
void FreeMemory(v8::PageAllocator* page_allocator, Address addr, size_t size);
// Partially release |bytes_to_free| bytes starting at |start_free|. Note that
// internally memory is freed from |start_free| to the end of the reservation.
// Additional memory beyond the page is not accounted though, so
@ -234,23 +229,10 @@ class MemoryAllocator {
}
#endif // DEBUG
// Commit memory region owned by given reservation object. Returns true if
// it succeeded and false otherwise.
bool CommitMemory(VirtualMemory* reservation);
// Uncommit memory region owned by given reservation object. Returns true if
// it succeeded and false otherwise.
bool UncommitMemory(VirtualMemory* reservation);
// Zaps a contiguous block of memory [start..(start+size)[ with
// a given zap value.
void ZapBlock(Address start, size_t size, uintptr_t zap_value);
V8_WARN_UNUSED_RESULT bool CommitExecutableMemory(VirtualMemory* vm,
Address start,
size_t commit_size,
size_t reserved_size);
// Page allocator instance for allocating non-executable pages.
// Guaranteed to be a valid pointer.
v8::PageAllocator* data_page_allocator() { return data_page_allocator_; }
@ -268,16 +250,37 @@ class MemoryAllocator {
Unmapper* unmapper() { return &unmapper_; }
// Performs all necessary bookkeeping to free the memory, but does not free
// it.
void UnregisterMemory(MemoryChunk* chunk);
void UnregisterMemory(BasicMemoryChunk* chunk,
Executability executable = NOT_EXECUTABLE);
void UnregisterSharedMemory(BasicMemoryChunk* chunk);
void RegisterReadOnlyMemory(ReadOnlyPage* page);
void UnregisterReadOnlyPage(ReadOnlyPage* page);
private:
// Returns a BasicMemoryChunk in which the memory region from commit_area_size
// to reserve_area_size of the chunk area is reserved but not committed, it
// could be committed later by calling MemoryChunk::CommitArea.
V8_EXPORT_PRIVATE BasicMemoryChunk* AllocateBasicChunk(
size_t reserve_area_size, size_t commit_area_size,
Executability executable, BaseSpace* space);
Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
size_t alignment, Executability executable,
void* hint, VirtualMemory* controller);
// Commit memory region owned by given reservation object. Returns true if
// it succeeded and false otherwise.
bool CommitMemory(VirtualMemory* reservation);
V8_WARN_UNUSED_RESULT bool CommitExecutableMemory(VirtualMemory* vm,
Address start,
size_t commit_size,
size_t reserved_size);
// Disallows any access on memory region owned by given reservation object.
// Returns true if it succeeded and false otherwise.
bool UncommitMemory(VirtualMemory* reservation);
// Frees the given memory region.
void FreeMemoryRegion(v8::PageAllocator* page_allocator, Address addr,
size_t size);
// PreFreeMemory logically frees the object, i.e., it unregisters the
// memory, logs a delete event and adds the chunk to remembered unmapped
// pages.
@ -289,8 +292,10 @@ class MemoryAllocator {
// See AllocatePage for public interface. Note that currently we only
// support pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize.
template <typename SpaceType>
MemoryChunk* AllocatePagePooled(SpaceType* owner);
MemoryChunk* AllocatePagePooled(Space* owner);
// Frees a pooled page. Only used on tear-down and last-resort GCs.
void FreePooledChunk(MemoryChunk* chunk);
// Initializes pages in a chunk. Returns the first page address.
// This function and GetChunkId() are provided for the mark-compact
@ -313,6 +318,15 @@ class MemoryAllocator {
}
}
// Performs all necessary bookkeeping to free the memory, but does not free
// it.
void UnregisterMemoryChunk(MemoryChunk* chunk);
void UnregisterSharedBasicMemoryChunk(BasicMemoryChunk* chunk);
void UnregisterBasicMemoryChunk(BasicMemoryChunk* chunk,
Executability executable = NOT_EXECUTABLE);
void RegisterReadOnlyMemory(ReadOnlyPage* page);
#ifdef DEBUG
void RegisterExecutableMemoryChunk(MemoryChunk* chunk) {
base::MutexGuard guard(&executable_memory_mutex_);
@ -375,29 +389,6 @@ class MemoryAllocator {
DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
};
extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
size_t size, PagedSpace* owner, Executability executable);
extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
size_t size, SemiSpace* owner, Executability executable);
extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
Page* MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
size_t size, SemiSpace* owner, Executability executable);
extern template EXPORT_TEMPLATE_DECLARE(
V8_EXPORT_PRIVATE) void MemoryAllocator::
Free<MemoryAllocator::kFull>(MemoryChunk* chunk);
extern template EXPORT_TEMPLATE_DECLARE(
V8_EXPORT_PRIVATE) void MemoryAllocator::
Free<MemoryAllocator::kAlreadyPooled>(MemoryChunk* chunk);
extern template EXPORT_TEMPLATE_DECLARE(
V8_EXPORT_PRIVATE) void MemoryAllocator::
Free<MemoryAllocator::kPreFreeAndQueue>(MemoryChunk* chunk);
extern template EXPORT_TEMPLATE_DECLARE(
V8_EXPORT_PRIVATE) void MemoryAllocator::
Free<MemoryAllocator::kPooledAndQueue>(MemoryChunk* chunk);
} // namespace internal
} // namespace v8

View File

@ -58,8 +58,8 @@ bool SemiSpace::EnsureCurrentCapacity() {
// Clear new space flags to avoid this page being treated as a new
// space page that is potentially being swept.
current_page->ClearFlags(Page::kIsInYoungGenerationMask);
heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
current_page);
heap()->memory_allocator()->Free(MemoryAllocator::kConcurrentlyAndPool,
current_page);
current_page = next_current;
}
@ -68,10 +68,10 @@ bool SemiSpace::EnsureCurrentCapacity() {
heap()->incremental_marking()->non_atomic_marking_state();
while (actual_pages < expected_pages) {
actual_pages++;
current_page =
heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
NOT_EXECUTABLE);
current_page = heap()->memory_allocator()->AllocatePage(
MemoryAllocator::kUsePool,
MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
NOT_EXECUTABLE);
if (current_page == nullptr) return false;
DCHECK_NOT_NULL(current_page);
memory_chunk_list_.PushBack(current_page);
@ -111,10 +111,9 @@ bool SemiSpace::Commit() {
// Pages in the new spaces can be moved to the old space by the full
// collector. Therefore, they must be initialized with the same FreeList as
// old pages.
Page* new_page =
heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
NOT_EXECUTABLE);
Page* new_page = heap()->memory_allocator()->AllocatePage(
MemoryAllocator::kUsePool,
MemoryChunkLayout::AllocatableMemoryInDataPage(), this, NOT_EXECUTABLE);
if (new_page == nullptr) {
if (pages_added) RewindPages(pages_added);
DCHECK(!IsCommitted());
@ -136,7 +135,8 @@ bool SemiSpace::Uncommit() {
while (!memory_chunk_list_.Empty()) {
MemoryChunk* chunk = memory_chunk_list_.front();
memory_chunk_list_.Remove(chunk);
heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(chunk);
heap()->memory_allocator()->Free(MemoryAllocator::kConcurrentlyAndPool,
chunk);
}
current_page_ = nullptr;
current_capacity_ = 0;
@ -169,10 +169,9 @@ bool SemiSpace::GrowTo(size_t new_capacity) {
IncrementalMarking::NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state();
for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
Page* new_page =
heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
NOT_EXECUTABLE);
Page* new_page = heap()->memory_allocator()->AllocatePage(
MemoryAllocator::kUsePool,
MemoryChunkLayout::AllocatableMemoryInDataPage(), this, NOT_EXECUTABLE);
if (new_page == nullptr) {
if (pages_added) RewindPages(pages_added);
return false;
@ -193,7 +192,8 @@ void SemiSpace::RewindPages(int num_pages) {
while (num_pages > 0) {
MemoryChunk* last = last_page();
memory_chunk_list_.Remove(last);
heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(last);
heap()->memory_allocator()->Free(MemoryAllocator::kConcurrentlyAndPool,
last);
num_pages--;
}
}

View File

@ -107,7 +107,7 @@ class SemiSpace : public Space {
void PrependPage(Page* page);
void MovePageToTheEnd(Page* page);
Page* InitializePage(MemoryChunk* chunk);
Page* InitializePage(MemoryChunk* chunk) override;
// Age mark accessors.
Address age_mark() { return age_mark_; }

View File

@ -103,7 +103,7 @@ void PagedSpace::TearDown() {
while (!memory_chunk_list_.Empty()) {
MemoryChunk* chunk = memory_chunk_list_.front();
memory_chunk_list_.Remove(chunk);
heap()->memory_allocator()->Free<MemoryAllocator::kFull>(chunk);
heap()->memory_allocator()->Free(MemoryAllocator::kImmediately, chunk);
}
accounting_stats_.Clear();
}
@ -319,8 +319,8 @@ void PagedSpace::ShrinkImmortalImmovablePages() {
}
Page* PagedSpace::AllocatePage() {
return heap()->memory_allocator()->AllocatePage(AreaSize(), this,
executable());
return heap()->memory_allocator()->AllocatePage(
MemoryAllocator::kRegular, AreaSize(), this, executable());
}
Page* PagedSpace::Expand() {
@ -493,7 +493,7 @@ void PagedSpace::ReleasePage(Page* page) {
AccountUncommitted(page->size());
accounting_stats_.DecreaseCapacity(page->area_size());
heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
heap()->memory_allocator()->Free(MemoryAllocator::kConcurrently, page);
}
void PagedSpace::SetReadable() {

View File

@ -216,7 +216,7 @@ class V8_EXPORT_PRIVATE PagedSpace
void RefineAllocatedBytesAfterSweeping(Page* page);
Page* InitializePage(MemoryChunk* chunk);
Page* InitializePage(MemoryChunk* chunk) override;
void ReleasePage(Page* page);

View File

@ -397,7 +397,7 @@ void ReadOnlySpace::Seal(SealMode ro_mode) {
DetachFromHeap();
for (ReadOnlyPage* p : pages_) {
if (ro_mode == SealMode::kDetachFromHeapAndUnregisterMemory) {
memory_allocator->UnregisterMemory(p);
memory_allocator->UnregisterReadOnlyPage(p);
}
if (ReadOnlyHeap::IsReadOnlySpaceShared()) {
p->MakeHeaderRelocatable();

View File

@ -182,6 +182,11 @@ class V8_EXPORT_PRIVATE Space : public BaseSpace {
heap::List<MemoryChunk>& memory_chunk_list() { return memory_chunk_list_; }
virtual Page* InitializePage(MemoryChunk* chunk) {
UNREACHABLE();
return nullptr;
}
FreeList* free_list() { return free_list_.get(); }
Address FirstPageAddress() const { return first_page()->address(); }

View File

@ -144,7 +144,7 @@ static void VerifyMemoryChunk(Isolate* isolate, Heap* heap,
memory_chunk->address() + memory_chunk->size());
CHECK(static_cast<size_t>(memory_chunk->area_size()) == commit_area_size);
memory_allocator->Free<MemoryAllocator::kFull>(memory_chunk);
memory_allocator->Free(MemoryAllocator::kImmediately, memory_chunk);
}
static unsigned int PseudorandomAreaSize() {
@ -201,8 +201,8 @@ TEST(MemoryAllocator) {
CHECK(!faked_space.first_page());
CHECK(!faked_space.last_page());
Page* first_page = memory_allocator->AllocatePage(
faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space),
NOT_EXECUTABLE);
MemoryAllocator::kRegular, faked_space.AreaSize(),
static_cast<PagedSpace*>(&faked_space), NOT_EXECUTABLE);
faked_space.memory_chunk_list().PushBack(first_page);
CHECK(first_page->next_page() == nullptr);
@ -214,8 +214,8 @@ TEST(MemoryAllocator) {
// Again, we should get n or n - 1 pages.
Page* other = memory_allocator->AllocatePage(
faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space),
NOT_EXECUTABLE);
MemoryAllocator::kRegular, faked_space.AreaSize(),
static_cast<PagedSpace*>(&faked_space), NOT_EXECUTABLE);
total_pages++;
faked_space.memory_chunk_list().PushBack(other);
int page_count = 0;
@ -808,8 +808,8 @@ TEST(NoMemoryForNewPage) {
LinearAllocationArea allocation_info;
OldSpace faked_space(heap, &allocation_info);
Page* page = memory_allocator->AllocatePage(
faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space),
NOT_EXECUTABLE);
MemoryAllocator::kRegular, faked_space.AreaSize(),
static_cast<PagedSpace*>(&faked_space), NOT_EXECUTABLE);
CHECK_NULL(page);
}

View File

@ -312,6 +312,7 @@ bool SequentialUnmapperTest::old_flag_;
TEST_F(SequentialUnmapperTest, UnmapOnTeardownAfterAlreadyFreeingPooled) {
if (FLAG_enable_third_party_heap) return;
Page* page = allocator()->AllocatePage(
MemoryAllocator::kRegular,
MemoryChunkLayout::AllocatableMemoryInDataPage(),
static_cast<PagedSpace*>(heap()->old_space()),
Executability::NOT_EXECUTABLE);
@ -319,7 +320,7 @@ TEST_F(SequentialUnmapperTest, UnmapOnTeardownAfterAlreadyFreeingPooled) {
const size_t page_size = tracking_page_allocator()->AllocatePageSize();
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
PageAllocator::kReadWrite);
allocator()->Free<MemoryAllocator::kPooledAndQueue>(page);
allocator()->Free(MemoryAllocator::kConcurrentlyAndPool, page);
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
PageAllocator::kReadWrite);
unmapper()->FreeQueuedChunks();
@ -341,6 +342,7 @@ TEST_F(SequentialUnmapperTest, UnmapOnTeardownAfterAlreadyFreeingPooled) {
TEST_F(SequentialUnmapperTest, UnmapOnTeardown) {
if (FLAG_enable_third_party_heap) return;
Page* page = allocator()->AllocatePage(
MemoryAllocator::kRegular,
MemoryChunkLayout::AllocatableMemoryInDataPage(),
static_cast<PagedSpace*>(heap()->old_space()),
Executability::NOT_EXECUTABLE);
@ -349,7 +351,7 @@ TEST_F(SequentialUnmapperTest, UnmapOnTeardown) {
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
PageAllocator::kReadWrite);
allocator()->Free<MemoryAllocator::kPooledAndQueue>(page);
allocator()->Free(MemoryAllocator::kConcurrentlyAndPool, page);
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
PageAllocator::kReadWrite);
unmapper()->TearDown();