[heap] Move remaining MemoryAllocator functions

Moves several functions missed in
https://chromium-review.googlesource.com/c/v8/v8/+/2203206.

Bug: v8:10473, v8:10506
Change-Id: I882410cefe496054b71db24a65133224dc52f23c
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2207144
Commit-Queue: Dan Elphick <delphick@chromium.org>
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Auto-Submit: Dan Elphick <delphick@chromium.org>
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#67870}
This commit is contained in:
Dan Elphick 2020-05-18 15:27:36 +01:00 committed by Commit Bot
parent 9e2f3f864e
commit b50d91960c
2 changed files with 361 additions and 363 deletions

View File

@ -6,9 +6,12 @@
#include <cinttypes>
#include "src/base/address-region.h"
#include "src/execution/isolate.h"
#include "src/flags/flags.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/heap-inl.h"
#include "src/heap/memory-chunk.h"
#include "src/logging/log.h"
namespace v8 {
@ -369,5 +372,363 @@ Address MemoryAllocator::AllocateAlignedMemory(
return base;
}
MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
size_t commit_area_size,
Executability executable,
Space* owner) {
DCHECK_LE(commit_area_size, reserve_area_size);
size_t chunk_size;
Heap* heap = isolate_->heap();
Address base = kNullAddress;
VirtualMemory reservation;
Address area_start = kNullAddress;
Address area_end = kNullAddress;
void* address_hint =
AlignedAddress(heap->GetRandomMmapAddr(), MemoryChunk::kAlignment);
//
// MemoryChunk layout:
//
// Executable
// +----------------------------+<- base aligned with MemoryChunk::kAlignment
// | Header |
// +----------------------------+<- base + CodePageGuardStartOffset
// | Guard |
// +----------------------------+<- area_start_
// | Area |
// +----------------------------+<- area_end_ (area_start + commit_area_size)
// | Committed but not used |
// +----------------------------+<- aligned at OS page boundary
// | Reserved but not committed |
// +----------------------------+<- aligned at OS page boundary
// | Guard |
// +----------------------------+<- base + chunk_size
//
// Non-executable
// +----------------------------+<- base aligned with MemoryChunk::kAlignment
// | Header |
// +----------------------------+<- area_start_ (base + area_start_)
// | Area |
// +----------------------------+<- area_end_ (area_start + commit_area_size)
// | Committed but not used |
// +----------------------------+<- aligned at OS page boundary
// | Reserved but not committed |
// +----------------------------+<- base + chunk_size
//
if (executable == EXECUTABLE) {
chunk_size = ::RoundUp(MemoryChunkLayout::ObjectStartOffsetInCodePage() +
reserve_area_size +
MemoryChunkLayout::CodePageGuardSize(),
GetCommitPageSize());
// Size of header (not executable) plus area (executable).
size_t commit_size = ::RoundUp(
MemoryChunkLayout::CodePageGuardStartOffset() + commit_area_size,
GetCommitPageSize());
base =
AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
executable, address_hint, &reservation);
if (base == kNullAddress) return nullptr;
// Update executable memory size.
size_executable_ += reservation.size();
if (Heap::ShouldZapGarbage()) {
ZapBlock(base, MemoryChunkLayout::CodePageGuardStartOffset(), kZapValue);
ZapBlock(base + MemoryChunkLayout::ObjectStartOffsetInCodePage(),
commit_area_size, kZapValue);
}
area_start = base + MemoryChunkLayout::ObjectStartOffsetInCodePage();
area_end = area_start + commit_area_size;
} else {
chunk_size = ::RoundUp(
MemoryChunkLayout::ObjectStartOffsetInDataPage() + reserve_area_size,
GetCommitPageSize());
size_t commit_size = ::RoundUp(
MemoryChunkLayout::ObjectStartOffsetInDataPage() + commit_area_size,
GetCommitPageSize());
base =
AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
executable, address_hint, &reservation);
if (base == kNullAddress) return nullptr;
if (Heap::ShouldZapGarbage()) {
ZapBlock(
base,
MemoryChunkLayout::ObjectStartOffsetInDataPage() + commit_area_size,
kZapValue);
}
area_start = base + MemoryChunkLayout::ObjectStartOffsetInDataPage();
area_end = area_start + commit_area_size;
}
// Use chunk_size for statistics because we assume that treat reserved but
// not-yet committed memory regions of chunks as allocated.
LOG(isolate_,
NewEvent("MemoryChunk", reinterpret_cast<void*>(base), chunk_size));
// We cannot use the last chunk in the address space because we would
// overflow when comparing top and limit if this chunk is used for a
// linear allocation area.
if ((base + chunk_size) == 0u) {
CHECK(!last_chunk_.IsReserved());
last_chunk_ = std::move(reservation);
UncommitMemory(&last_chunk_);
size_ -= chunk_size;
if (executable == EXECUTABLE) {
size_executable_ -= chunk_size;
}
CHECK(last_chunk_.IsReserved());
return AllocateChunk(reserve_area_size, commit_area_size, executable,
owner);
}
MemoryChunk* chunk =
MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
executable, owner, std::move(reservation));
if (chunk->executable()) RegisterExecutableMemoryChunk(chunk);
return chunk;
}
void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free,
size_t bytes_to_free,
Address new_area_end) {
VirtualMemory* reservation = chunk->reserved_memory();
DCHECK(reservation->IsReserved());
chunk->set_size(chunk->size() - bytes_to_free);
chunk->set_area_end(new_area_end);
if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
// Add guard page at the end.
size_t page_size = GetCommitPageSize();
DCHECK_EQ(0, chunk->area_end() % static_cast<Address>(page_size));
DCHECK_EQ(chunk->address() + chunk->size(),
chunk->area_end() + MemoryChunkLayout::CodePageGuardSize());
reservation->SetPermissions(chunk->area_end(), page_size,
PageAllocator::kNoAccess);
}
// On e.g. Windows, a reservation may be larger than a page and releasing
// partially starting at |start_free| will also release the potentially
// unused part behind the current page.
const size_t released_bytes = reservation->Release(start_free);
DCHECK_GE(size_, released_bytes);
size_ -= released_bytes;
}
void MemoryAllocator::UnregisterMemory(MemoryChunk* chunk) {
DCHECK(!chunk->IsFlagSet(MemoryChunk::UNREGISTERED));
VirtualMemory* reservation = chunk->reserved_memory();
const size_t size =
reservation->IsReserved() ? reservation->size() : chunk->size();
DCHECK_GE(size_, static_cast<size_t>(size));
size_ -= size;
if (chunk->executable() == EXECUTABLE) {
DCHECK_GE(size_executable_, size);
size_executable_ -= size;
}
if (chunk->executable()) UnregisterExecutableMemoryChunk(chunk);
chunk->SetFlag(MemoryChunk::UNREGISTERED);
}
void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
UnregisterMemory(chunk);
isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
chunk->IsEvacuationCandidate());
chunk->SetFlag(MemoryChunk::PRE_FREED);
}
void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
DCHECK(chunk->IsFlagSet(MemoryChunk::UNREGISTERED));
DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
chunk->ReleaseAllAllocatedMemory();
VirtualMemory* reservation = chunk->reserved_memory();
if (chunk->IsFlagSet(MemoryChunk::POOLED)) {
UncommitMemory(reservation);
} else {
if (reservation->IsReserved()) {
reservation->Free();
} else {
// Only read-only pages can have non-initialized reservation object.
DCHECK_EQ(RO_SPACE, chunk->owner_identity());
FreeMemory(page_allocator(chunk->executable()), chunk->address(),
chunk->size());
}
}
}
template <MemoryAllocator::FreeMode mode>
void MemoryAllocator::Free(MemoryChunk* chunk) {
switch (mode) {
case kFull:
PreFreeMemory(chunk);
PerformFreeMemory(chunk);
break;
case kAlreadyPooled:
// Pooled pages cannot be touched anymore as their memory is uncommitted.
// Pooled pages are not-executable.
FreeMemory(data_page_allocator(), chunk->address(),
static_cast<size_t>(MemoryChunk::kPageSize));
break;
case kPooledAndQueue:
DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
chunk->SetFlag(MemoryChunk::POOLED);
V8_FALLTHROUGH;
case kPreFreeAndQueue:
PreFreeMemory(chunk);
// The chunks added to this queue will be freed by a concurrent thread.
unmapper()->AddMemoryChunkSafe(chunk);
break;
}
}
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
MemoryAllocator::kFull>(MemoryChunk* chunk);
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
MemoryAllocator::kAlreadyPooled>(MemoryChunk* chunk);
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
MemoryAllocator::kPreFreeAndQueue>(MemoryChunk* chunk);
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
MemoryAllocator::kPooledAndQueue>(MemoryChunk* chunk);
template <MemoryAllocator::AllocationMode alloc_mode, typename SpaceType>
Page* MemoryAllocator::AllocatePage(size_t size, SpaceType* owner,
Executability executable) {
MemoryChunk* chunk = nullptr;
if (alloc_mode == kPooled) {
DCHECK_EQ(size, static_cast<size_t>(
MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
owner->identity())));
DCHECK_EQ(executable, NOT_EXECUTABLE);
chunk = AllocatePagePooled(owner);
}
if (chunk == nullptr) {
chunk = AllocateChunk(size, size, executable, owner);
}
if (chunk == nullptr) return nullptr;
return owner->InitializePage(chunk);
}
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
size_t size, PagedSpace* owner, Executability executable);
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
size_t size, SemiSpace* owner, Executability executable);
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
Page* MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
size_t size, SemiSpace* owner, Executability executable);
LargePage* MemoryAllocator::AllocateLargePage(size_t size,
LargeObjectSpace* owner,
Executability executable) {
MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
if (chunk == nullptr) return nullptr;
return LargePage::Initialize(isolate_->heap(), chunk, executable);
}
template <typename SpaceType>
MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
MemoryChunk* chunk = unmapper()->TryGetPooledMemoryChunkSafe();
if (chunk == nullptr) return nullptr;
const int size = MemoryChunk::kPageSize;
const Address start = reinterpret_cast<Address>(chunk);
const Address area_start =
start +
MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(owner->identity());
const Address area_end = start + size;
// Pooled pages are always regular data pages.
DCHECK_NE(CODE_SPACE, owner->identity());
VirtualMemory reservation(data_page_allocator(), start, size);
if (!CommitMemory(&reservation)) return nullptr;
if (Heap::ShouldZapGarbage()) {
ZapBlock(start, size, kZapValue);
}
MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end,
NOT_EXECUTABLE, owner, std::move(reservation));
size_ += size;
return chunk;
}
void MemoryAllocator::ZapBlock(Address start, size_t size,
uintptr_t zap_value) {
DCHECK(IsAligned(start, kTaggedSize));
DCHECK(IsAligned(size, kTaggedSize));
MemsetTagged(ObjectSlot(start), Object(static_cast<Address>(zap_value)),
size >> kTaggedSizeLog2);
}
intptr_t MemoryAllocator::GetCommitPageSize() {
if (FLAG_v8_os_page_size != 0) {
DCHECK(base::bits::IsPowerOfTwo(FLAG_v8_os_page_size));
return FLAG_v8_os_page_size * KB;
} else {
return CommitPageSize();
}
}
base::AddressRegion MemoryAllocator::ComputeDiscardMemoryArea(Address addr,
size_t size) {
size_t page_size = MemoryAllocator::GetCommitPageSize();
if (size < page_size + FreeSpace::kSize) {
return base::AddressRegion(0, 0);
}
Address discardable_start = RoundUp(addr + FreeSpace::kSize, page_size);
Address discardable_end = RoundDown(addr + size, page_size);
if (discardable_start >= discardable_end) return base::AddressRegion(0, 0);
return base::AddressRegion(discardable_start,
discardable_end - discardable_start);
}
bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, Address start,
size_t commit_size,
size_t reserved_size) {
const size_t page_size = GetCommitPageSize();
// All addresses and sizes must be aligned to the commit page size.
DCHECK(IsAligned(start, page_size));
DCHECK_EQ(0, commit_size % page_size);
DCHECK_EQ(0, reserved_size % page_size);
const size_t guard_size = MemoryChunkLayout::CodePageGuardSize();
const size_t pre_guard_offset = MemoryChunkLayout::CodePageGuardStartOffset();
const size_t code_area_offset =
MemoryChunkLayout::ObjectStartOffsetInCodePage();
// reserved_size includes two guard regions, commit_size does not.
DCHECK_LE(commit_size, reserved_size - 2 * guard_size);
const Address pre_guard_page = start + pre_guard_offset;
const Address code_area = start + code_area_offset;
const Address post_guard_page = start + reserved_size - guard_size;
// Commit the non-executable header, from start to pre-code guard page.
if (vm->SetPermissions(start, pre_guard_offset, PageAllocator::kReadWrite)) {
// Create the pre-code guard page, following the header.
if (vm->SetPermissions(pre_guard_page, page_size,
PageAllocator::kNoAccess)) {
// Commit the executable code body.
if (vm->SetPermissions(code_area, commit_size - pre_guard_offset,
PageAllocator::kReadWrite)) {
// Create the post-code guard page.
if (vm->SetPermissions(post_guard_page, page_size,
PageAllocator::kNoAccess)) {
UpdateAllocatedSpaceLimits(start, code_area + commit_size);
return true;
}
vm->SetPermissions(code_area, commit_size, PageAllocator::kNoAccess);
}
}
vm->SetPermissions(start, pre_guard_offset, PageAllocator::kNoAccess);
}
return false;
}
} // namespace internal
} // namespace v8

View File

@ -11,11 +11,7 @@
#include "src/base/bits.h"
#include "src/base/bounded-page-allocator.h"
#include "src/base/macros.h"
#include "src/base/optional.h"
#include "src/base/platform/semaphore.h"
#include "src/common/globals.h"
#include "src/execution/vm-state-inl.h"
#include "src/heap/array-buffer-sweeper.h"
#include "src/heap/array-buffer-tracker-inl.h"
#include "src/heap/combined-heap.h"
#include "src/heap/concurrent-marking.h"
@ -29,7 +25,6 @@
#include "src/heap/read-only-heap.h"
#include "src/heap/remembered-set-inl.h"
#include "src/heap/slot-set.h"
#include "src/heap/sweeper.h"
#include "src/init/v8.h"
#include "src/logging/counters.h"
#include "src/objects/free-space-inl.h"
@ -135,129 +130,6 @@ void Page::MergeOldToNewRememberedSets() {
sweeping_slot_set_ = nullptr;
}
MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
size_t commit_area_size,
Executability executable,
Space* owner) {
DCHECK_LE(commit_area_size, reserve_area_size);
size_t chunk_size;
Heap* heap = isolate_->heap();
Address base = kNullAddress;
VirtualMemory reservation;
Address area_start = kNullAddress;
Address area_end = kNullAddress;
void* address_hint =
AlignedAddress(heap->GetRandomMmapAddr(), MemoryChunk::kAlignment);
//
// MemoryChunk layout:
//
// Executable
// +----------------------------+<- base aligned with MemoryChunk::kAlignment
// | Header |
// +----------------------------+<- base + CodePageGuardStartOffset
// | Guard |
// +----------------------------+<- area_start_
// | Area |
// +----------------------------+<- area_end_ (area_start + commit_area_size)
// | Committed but not used |
// +----------------------------+<- aligned at OS page boundary
// | Reserved but not committed |
// +----------------------------+<- aligned at OS page boundary
// | Guard |
// +----------------------------+<- base + chunk_size
//
// Non-executable
// +----------------------------+<- base aligned with MemoryChunk::kAlignment
// | Header |
// +----------------------------+<- area_start_ (base + area_start_)
// | Area |
// +----------------------------+<- area_end_ (area_start + commit_area_size)
// | Committed but not used |
// +----------------------------+<- aligned at OS page boundary
// | Reserved but not committed |
// +----------------------------+<- base + chunk_size
//
if (executable == EXECUTABLE) {
chunk_size = ::RoundUp(MemoryChunkLayout::ObjectStartOffsetInCodePage() +
reserve_area_size +
MemoryChunkLayout::CodePageGuardSize(),
GetCommitPageSize());
// Size of header (not executable) plus area (executable).
size_t commit_size = ::RoundUp(
MemoryChunkLayout::CodePageGuardStartOffset() + commit_area_size,
GetCommitPageSize());
base =
AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
executable, address_hint, &reservation);
if (base == kNullAddress) return nullptr;
// Update executable memory size.
size_executable_ += reservation.size();
if (Heap::ShouldZapGarbage()) {
ZapBlock(base, MemoryChunkLayout::CodePageGuardStartOffset(), kZapValue);
ZapBlock(base + MemoryChunkLayout::ObjectStartOffsetInCodePage(),
commit_area_size, kZapValue);
}
area_start = base + MemoryChunkLayout::ObjectStartOffsetInCodePage();
area_end = area_start + commit_area_size;
} else {
chunk_size = ::RoundUp(
MemoryChunkLayout::ObjectStartOffsetInDataPage() + reserve_area_size,
GetCommitPageSize());
size_t commit_size = ::RoundUp(
MemoryChunkLayout::ObjectStartOffsetInDataPage() + commit_area_size,
GetCommitPageSize());
base =
AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
executable, address_hint, &reservation);
if (base == kNullAddress) return nullptr;
if (Heap::ShouldZapGarbage()) {
ZapBlock(
base,
MemoryChunkLayout::ObjectStartOffsetInDataPage() + commit_area_size,
kZapValue);
}
area_start = base + MemoryChunkLayout::ObjectStartOffsetInDataPage();
area_end = area_start + commit_area_size;
}
// Use chunk_size for statistics because we assume that treat reserved but
// not-yet committed memory regions of chunks as allocated.
LOG(isolate_,
NewEvent("MemoryChunk", reinterpret_cast<void*>(base), chunk_size));
// We cannot use the last chunk in the address space because we would
// overflow when comparing top and limit if this chunk is used for a
// linear allocation area.
if ((base + chunk_size) == 0u) {
CHECK(!last_chunk_.IsReserved());
last_chunk_ = std::move(reservation);
UncommitMemory(&last_chunk_);
size_ -= chunk_size;
if (executable == EXECUTABLE) {
size_executable_ -= chunk_size;
}
CHECK(last_chunk_.IsReserved());
return AllocateChunk(reserve_area_size, commit_area_size, executable,
owner);
}
MemoryChunk* chunk =
MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
executable, owner, std::move(reservation));
if (chunk->executable()) RegisterExecutableMemoryChunk(chunk);
return chunk;
}
void Page::ResetAllocationStatistics() {
allocated_bytes_ = area_size();
wasted_memory_ = 0;
@ -366,241 +238,6 @@ void Page::DestroyBlackArea(Address start, Address end) {
marking_state->IncrementLiveBytes(this, -static_cast<intptr_t>(end - start));
}
void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free,
size_t bytes_to_free,
Address new_area_end) {
VirtualMemory* reservation = chunk->reserved_memory();
DCHECK(reservation->IsReserved());
chunk->set_size(chunk->size() - bytes_to_free);
chunk->set_area_end(new_area_end);
if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
// Add guard page at the end.
size_t page_size = GetCommitPageSize();
DCHECK_EQ(0, chunk->area_end() % static_cast<Address>(page_size));
DCHECK_EQ(chunk->address() + chunk->size(),
chunk->area_end() + MemoryChunkLayout::CodePageGuardSize());
reservation->SetPermissions(chunk->area_end(), page_size,
PageAllocator::kNoAccess);
}
// On e.g. Windows, a reservation may be larger than a page and releasing
// partially starting at |start_free| will also release the potentially
// unused part behind the current page.
const size_t released_bytes = reservation->Release(start_free);
DCHECK_GE(size_, released_bytes);
size_ -= released_bytes;
}
void MemoryAllocator::UnregisterMemory(MemoryChunk* chunk) {
DCHECK(!chunk->IsFlagSet(MemoryChunk::UNREGISTERED));
VirtualMemory* reservation = chunk->reserved_memory();
const size_t size =
reservation->IsReserved() ? reservation->size() : chunk->size();
DCHECK_GE(size_, static_cast<size_t>(size));
size_ -= size;
if (chunk->executable() == EXECUTABLE) {
DCHECK_GE(size_executable_, size);
size_executable_ -= size;
}
if (chunk->executable()) UnregisterExecutableMemoryChunk(chunk);
chunk->SetFlag(MemoryChunk::UNREGISTERED);
}
void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
UnregisterMemory(chunk);
isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
chunk->IsEvacuationCandidate());
chunk->SetFlag(MemoryChunk::PRE_FREED);
}
void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
DCHECK(chunk->IsFlagSet(MemoryChunk::UNREGISTERED));
DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
chunk->ReleaseAllAllocatedMemory();
VirtualMemory* reservation = chunk->reserved_memory();
if (chunk->IsFlagSet(MemoryChunk::POOLED)) {
UncommitMemory(reservation);
} else {
if (reservation->IsReserved()) {
reservation->Free();
} else {
// Only read-only pages can have non-initialized reservation object.
DCHECK_EQ(RO_SPACE, chunk->owner_identity());
FreeMemory(page_allocator(chunk->executable()), chunk->address(),
chunk->size());
}
}
}
template <MemoryAllocator::FreeMode mode>
void MemoryAllocator::Free(MemoryChunk* chunk) {
switch (mode) {
case kFull:
PreFreeMemory(chunk);
PerformFreeMemory(chunk);
break;
case kAlreadyPooled:
// Pooled pages cannot be touched anymore as their memory is uncommitted.
// Pooled pages are not-executable.
FreeMemory(data_page_allocator(), chunk->address(),
static_cast<size_t>(MemoryChunk::kPageSize));
break;
case kPooledAndQueue:
DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
chunk->SetFlag(MemoryChunk::POOLED);
V8_FALLTHROUGH;
case kPreFreeAndQueue:
PreFreeMemory(chunk);
// The chunks added to this queue will be freed by a concurrent thread.
unmapper()->AddMemoryChunkSafe(chunk);
break;
}
}
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
MemoryAllocator::kFull>(MemoryChunk* chunk);
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
MemoryAllocator::kAlreadyPooled>(MemoryChunk* chunk);
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
MemoryAllocator::kPreFreeAndQueue>(MemoryChunk* chunk);
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE) void MemoryAllocator::Free<
MemoryAllocator::kPooledAndQueue>(MemoryChunk* chunk);
template <MemoryAllocator::AllocationMode alloc_mode, typename SpaceType>
Page* MemoryAllocator::AllocatePage(size_t size, SpaceType* owner,
Executability executable) {
MemoryChunk* chunk = nullptr;
if (alloc_mode == kPooled) {
DCHECK_EQ(size, static_cast<size_t>(
MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
owner->identity())));
DCHECK_EQ(executable, NOT_EXECUTABLE);
chunk = AllocatePagePooled(owner);
}
if (chunk == nullptr) {
chunk = AllocateChunk(size, size, executable, owner);
}
if (chunk == nullptr) return nullptr;
return owner->InitializePage(chunk);
}
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
size_t size, PagedSpace* owner, Executability executable);
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
size_t size, SemiSpace* owner, Executability executable);
template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
Page* MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
size_t size, SemiSpace* owner, Executability executable);
LargePage* MemoryAllocator::AllocateLargePage(size_t size,
LargeObjectSpace* owner,
Executability executable) {
MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
if (chunk == nullptr) return nullptr;
return LargePage::Initialize(isolate_->heap(), chunk, executable);
}
template <typename SpaceType>
MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
MemoryChunk* chunk = unmapper()->TryGetPooledMemoryChunkSafe();
if (chunk == nullptr) return nullptr;
const int size = MemoryChunk::kPageSize;
const Address start = reinterpret_cast<Address>(chunk);
const Address area_start =
start +
MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(owner->identity());
const Address area_end = start + size;
// Pooled pages are always regular data pages.
DCHECK_NE(CODE_SPACE, owner->identity());
VirtualMemory reservation(data_page_allocator(), start, size);
if (!CommitMemory(&reservation)) return nullptr;
if (Heap::ShouldZapGarbage()) {
ZapBlock(start, size, kZapValue);
}
MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end,
NOT_EXECUTABLE, owner, std::move(reservation));
size_ += size;
return chunk;
}
void MemoryAllocator::ZapBlock(Address start, size_t size,
uintptr_t zap_value) {
DCHECK(IsAligned(start, kTaggedSize));
DCHECK(IsAligned(size, kTaggedSize));
MemsetTagged(ObjectSlot(start), Object(static_cast<Address>(zap_value)),
size >> kTaggedSizeLog2);
}
intptr_t MemoryAllocator::GetCommitPageSize() {
if (FLAG_v8_os_page_size != 0) {
DCHECK(base::bits::IsPowerOfTwo(FLAG_v8_os_page_size));
return FLAG_v8_os_page_size * KB;
} else {
return CommitPageSize();
}
}
base::AddressRegion MemoryAllocator::ComputeDiscardMemoryArea(Address addr,
size_t size) {
size_t page_size = MemoryAllocator::GetCommitPageSize();
if (size < page_size + FreeSpace::kSize) {
return base::AddressRegion(0, 0);
}
Address discardable_start = RoundUp(addr + FreeSpace::kSize, page_size);
Address discardable_end = RoundDown(addr + size, page_size);
if (discardable_start >= discardable_end) return base::AddressRegion(0, 0);
return base::AddressRegion(discardable_start,
discardable_end - discardable_start);
}
bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, Address start,
size_t commit_size,
size_t reserved_size) {
const size_t page_size = GetCommitPageSize();
// All addresses and sizes must be aligned to the commit page size.
DCHECK(IsAligned(start, page_size));
DCHECK_EQ(0, commit_size % page_size);
DCHECK_EQ(0, reserved_size % page_size);
const size_t guard_size = MemoryChunkLayout::CodePageGuardSize();
const size_t pre_guard_offset = MemoryChunkLayout::CodePageGuardStartOffset();
const size_t code_area_offset =
MemoryChunkLayout::ObjectStartOffsetInCodePage();
// reserved_size includes two guard regions, commit_size does not.
DCHECK_LE(commit_size, reserved_size - 2 * guard_size);
const Address pre_guard_page = start + pre_guard_offset;
const Address code_area = start + code_area_offset;
const Address post_guard_page = start + reserved_size - guard_size;
// Commit the non-executable header, from start to pre-code guard page.
if (vm->SetPermissions(start, pre_guard_offset, PageAllocator::kReadWrite)) {
// Create the pre-code guard page, following the header.
if (vm->SetPermissions(pre_guard_page, page_size,
PageAllocator::kNoAccess)) {
// Commit the executable code body.
if (vm->SetPermissions(code_area, commit_size - pre_guard_offset,
PageAllocator::kReadWrite)) {
// Create the post-code guard page.
if (vm->SetPermissions(post_guard_page, page_size,
PageAllocator::kNoAccess)) {
UpdateAllocatedSpaceLimits(start, code_area + commit_size);
return true;
}
vm->SetPermissions(code_area, commit_size, PageAllocator::kNoAccess);
}
}
vm->SetPermissions(start, pre_guard_offset, PageAllocator::kNoAccess);
}
return false;
}
// -----------------------------------------------------------------------------
// PagedSpace implementation