[heap] Initialize pages with placement-new
Define ctors for BasicMemoryChunk, ReadOnlyPage, MemoryChunk, Page and LargePage. We can use those with placement-new to initialize pages. We now initialize chunks at once either for ReadOnlyPage, Page or LargePage. Previously initialization happened in multiple locations starting with BasicMemoryChunk::Initialize. Adding ctors to these classes should improve debugging, since debug info for classes without ctors was removed with the compiler flag `-fuse-ctor-homing`. Change-Id: Ib842bb9b1e93a6576cad8299b7c5dbfe299baa33 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3545092 Reviewed-by: Michael Lippautz <mlippautz@chromium.org> Commit-Queue: Dominik Inführ <dinfuehr@chromium.org> Cr-Commit-Position: refs/heads/main@{#79638}
This commit is contained in:
parent
bd7f4823c1
commit
a847182056
@ -910,7 +910,7 @@ enum class CompactionSpaceKind {
|
||||
|
||||
enum Executability { NOT_EXECUTABLE, EXECUTABLE };
|
||||
|
||||
enum PageSize { kRegular, kLarge };
|
||||
enum class PageSize { kRegular, kLarge };
|
||||
|
||||
enum class CodeFlushMode {
|
||||
kFlushBytecode,
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include "src/heap/heap-write-barrier-inl.h"
|
||||
#include "src/heap/incremental-marking.h"
|
||||
#include "src/objects/heap-object.h"
|
||||
#include "src/utils/allocation.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
@ -49,32 +50,19 @@ constexpr BasicMemoryChunk::MainThreadFlags BasicMemoryChunk::kIsLargePageMask;
|
||||
constexpr BasicMemoryChunk::MainThreadFlags
|
||||
BasicMemoryChunk::kSkipEvacuationSlotsRecordingMask;
|
||||
|
||||
BasicMemoryChunk::BasicMemoryChunk(size_t size, Address area_start,
|
||||
Address area_end) {
|
||||
size_ = size;
|
||||
area_start_ = area_start;
|
||||
area_end_ = area_end;
|
||||
}
|
||||
|
||||
// static
|
||||
BasicMemoryChunk* BasicMemoryChunk::Initialize(Heap* heap, Address base,
|
||||
size_t size, Address area_start,
|
||||
Address area_end,
|
||||
BaseSpace* owner,
|
||||
VirtualMemory reservation) {
|
||||
BasicMemoryChunk* chunk = FromAddress(base);
|
||||
DCHECK_EQ(base, chunk->address());
|
||||
new (chunk) BasicMemoryChunk(size, area_start, area_end);
|
||||
|
||||
chunk->heap_ = heap;
|
||||
chunk->set_owner(owner);
|
||||
chunk->reservation_ = std::move(reservation);
|
||||
chunk->high_water_mark_ = static_cast<intptr_t>(area_start - base);
|
||||
chunk->allocated_bytes_ = chunk->area_size();
|
||||
chunk->wasted_memory_ = 0;
|
||||
chunk->marking_bitmap<AccessMode::NON_ATOMIC>()->Clear();
|
||||
|
||||
return chunk;
|
||||
BasicMemoryChunk::BasicMemoryChunk(Heap* heap, BaseSpace* space,
|
||||
size_t chunk_size, Address area_start,
|
||||
Address area_end, VirtualMemory reservation)
|
||||
: size_(chunk_size),
|
||||
heap_(heap),
|
||||
area_start_(area_start),
|
||||
area_end_(area_end),
|
||||
allocated_bytes_(area_end - area_start),
|
||||
wasted_memory_(0),
|
||||
high_water_mark_(area_start - reinterpret_cast<Address>(this)),
|
||||
owner_(space),
|
||||
reservation_(std::move(reservation)) {
|
||||
marking_bitmap<AccessMode::NON_ATOMIC>()->Clear();
|
||||
}
|
||||
|
||||
bool BasicMemoryChunk::InOldSpace() const {
|
||||
|
@ -129,7 +129,9 @@ class BasicMemoryChunk {
|
||||
|
||||
static const intptr_t kAlignmentMask = kAlignment - 1;
|
||||
|
||||
BasicMemoryChunk(size_t size, Address area_start, Address area_end);
|
||||
BasicMemoryChunk(Heap* heap, BaseSpace* space, size_t chunk_size,
|
||||
Address area_start, Address area_end,
|
||||
VirtualMemory reservation);
|
||||
|
||||
static Address BaseAddress(Address a) { return a & ~kAlignmentMask; }
|
||||
|
||||
@ -178,7 +180,7 @@ class BasicMemoryChunk {
|
||||
void ClearFlags(MainThreadFlags flags) { main_thread_flags_ &= ~flags; }
|
||||
// Set or clear multiple flags at a time. `mask` indicates which flags are
|
||||
// should be replaced with new `flags`.
|
||||
void SetFlags(MainThreadFlags flags, MainThreadFlags mask) {
|
||||
void SetFlags(MainThreadFlags flags, MainThreadFlags mask = kAllFlagsMask) {
|
||||
main_thread_flags_ = (main_thread_flags_ & ~mask) | (flags & mask);
|
||||
}
|
||||
|
||||
@ -254,11 +256,6 @@ class BasicMemoryChunk {
|
||||
return addr >= area_start() && addr <= area_end();
|
||||
}
|
||||
|
||||
static BasicMemoryChunk* Initialize(Heap* heap, Address base, size_t size,
|
||||
Address area_start, Address area_end,
|
||||
BaseSpace* owner,
|
||||
VirtualMemory reservation);
|
||||
|
||||
size_t wasted_memory() const { return wasted_memory_; }
|
||||
void add_wasted_memory(size_t waste) { wasted_memory_ += waste; }
|
||||
size_t allocated_bytes() const { return allocated_bytes_; }
|
||||
|
@ -30,6 +30,21 @@ namespace internal {
|
||||
// order to figure out if it's a cleared weak reference or not.
|
||||
STATIC_ASSERT(kClearedWeakHeapObjectLower32 < LargePage::kHeaderSize);
|
||||
|
||||
LargePage::LargePage(Heap* heap, BaseSpace* space, size_t chunk_size,
|
||||
Address area_start, Address area_end,
|
||||
VirtualMemory reservation, Executability executable)
|
||||
: MemoryChunk(heap, space, chunk_size, area_start, area_end,
|
||||
std::move(reservation), executable, PageSize::kLarge) {
|
||||
STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
|
||||
|
||||
if (executable && chunk_size > LargePage::kMaxCodePageSize) {
|
||||
FATAL("Code page is too large.");
|
||||
}
|
||||
|
||||
SetFlag(MemoryChunk::LARGE_PAGE);
|
||||
list_node().Initialize();
|
||||
}
|
||||
|
||||
LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
|
||||
Executability executable) {
|
||||
if (executable && chunk->size() > LargePage::kMaxCodePageSize) {
|
||||
@ -107,7 +122,8 @@ void LargeObjectSpace::TearDown() {
|
||||
DeleteEvent("LargeObjectChunk",
|
||||
reinterpret_cast<void*>(page->address())));
|
||||
memory_chunk_list_.Remove(page);
|
||||
heap()->memory_allocator()->Free(MemoryAllocator::kImmediately, page);
|
||||
heap()->memory_allocator()->Free(MemoryAllocator::FreeMode::kImmediately,
|
||||
page);
|
||||
}
|
||||
}
|
||||
|
||||
@ -195,7 +211,7 @@ AllocationResult OldLargeObjectSpace::AllocateRawBackground(
|
||||
LargePage* LargeObjectSpace::AllocateLargePage(int object_size,
|
||||
Executability executable) {
|
||||
LargePage* page = heap()->memory_allocator()->AllocateLargePage(
|
||||
object_size, this, executable);
|
||||
this, object_size, executable);
|
||||
if (page == nullptr) return nullptr;
|
||||
DCHECK_GE(page->area_size(), static_cast<size_t>(object_size));
|
||||
|
||||
@ -324,7 +340,8 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
|
||||
}
|
||||
} else {
|
||||
RemovePage(current, size);
|
||||
heap()->memory_allocator()->Free(MemoryAllocator::kConcurrently, current);
|
||||
heap()->memory_allocator()->Free(MemoryAllocator::FreeMode::kConcurrently,
|
||||
current);
|
||||
}
|
||||
current = next_current;
|
||||
}
|
||||
@ -544,7 +561,8 @@ void NewLargeObjectSpace::FreeDeadObjects(
|
||||
if (is_dead(object)) {
|
||||
freed_pages = true;
|
||||
RemovePage(page, size);
|
||||
heap()->memory_allocator()->Free(MemoryAllocator::kConcurrently, page);
|
||||
heap()->memory_allocator()->Free(MemoryAllocator::FreeMode::kConcurrently,
|
||||
page);
|
||||
if (FLAG_concurrent_marking && is_marking) {
|
||||
heap()->concurrent_marking()->ClearMemoryChunkData(page);
|
||||
}
|
||||
|
@ -31,6 +31,10 @@ class LargePage : public MemoryChunk {
|
||||
// already imposes on x64 and ia32 architectures.
|
||||
static const int kMaxCodePageSize = 512 * MB;
|
||||
|
||||
LargePage(Heap* heap, BaseSpace* space, size_t chunk_size, Address area_start,
|
||||
Address area_end, VirtualMemory reservation,
|
||||
Executability executable);
|
||||
|
||||
static LargePage* FromHeapObject(HeapObject o) {
|
||||
DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
|
||||
return static_cast<LargePage*>(MemoryChunk::FromHeapObject(o));
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include "src/common/globals.h"
|
||||
#include "src/execution/isolate.h"
|
||||
#include "src/flags/flags.h"
|
||||
#include "src/heap/basic-memory-chunk.h"
|
||||
#include "src/heap/gc-tracer.h"
|
||||
#include "src/heap/heap-inl.h"
|
||||
#include "src/heap/memory-chunk.h"
|
||||
@ -51,8 +52,8 @@ void MemoryAllocator::TearDown() {
|
||||
// DCHECK_EQ(0, size_executable_);
|
||||
capacity_ = 0;
|
||||
|
||||
if (last_chunk_.IsReserved()) {
|
||||
last_chunk_.Free();
|
||||
if (reserved_chunk_at_virtual_memory_limit_) {
|
||||
reserved_chunk_at_virtual_memory_limit_->Free();
|
||||
}
|
||||
|
||||
code_page_allocator_ = nullptr;
|
||||
@ -140,7 +141,7 @@ void MemoryAllocator::Unmapper::EnsureUnmappingCompleted() {
|
||||
void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedNonRegularChunks(
|
||||
JobDelegate* delegate) {
|
||||
MemoryChunk* chunk = nullptr;
|
||||
while ((chunk = GetMemoryChunkSafe(kNonRegular)) != nullptr) {
|
||||
while ((chunk = GetMemoryChunkSafe(ChunkQueueType::kNonRegular)) != nullptr) {
|
||||
allocator_->PerformFreeMemory(chunk);
|
||||
if (delegate && delegate->ShouldYield()) return;
|
||||
}
|
||||
@ -156,17 +157,17 @@ void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks(
|
||||
NumberOfChunks());
|
||||
}
|
||||
// Regular chunks.
|
||||
while ((chunk = GetMemoryChunkSafe(kRegular)) != nullptr) {
|
||||
while ((chunk = GetMemoryChunkSafe(ChunkQueueType::kRegular)) != nullptr) {
|
||||
bool pooled = chunk->IsFlagSet(MemoryChunk::POOLED);
|
||||
allocator_->PerformFreeMemory(chunk);
|
||||
if (pooled) AddMemoryChunkSafe(kPooled, chunk);
|
||||
if (pooled) AddMemoryChunkSafe(ChunkQueueType::kPooled, chunk);
|
||||
if (delegate && delegate->ShouldYield()) return;
|
||||
}
|
||||
if (mode == MemoryAllocator::Unmapper::FreeMode::kFreePooled) {
|
||||
// The previous loop uncommitted any pages marked as pooled and added them
|
||||
// to the pooled list. In case of kFreePooled we need to free them though as
|
||||
// well.
|
||||
while ((chunk = GetMemoryChunkSafe(kPooled)) != nullptr) {
|
||||
while ((chunk = GetMemoryChunkSafe(ChunkQueueType::kPooled)) != nullptr) {
|
||||
allocator_->FreePooledChunk(chunk);
|
||||
if (delegate && delegate->ShouldYield()) return;
|
||||
}
|
||||
@ -177,20 +178,21 @@ void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks(
|
||||
void MemoryAllocator::Unmapper::TearDown() {
|
||||
CHECK(!job_handle_ || !job_handle_->IsValid());
|
||||
PerformFreeMemoryOnQueuedChunks(FreeMode::kFreePooled);
|
||||
for (int i = 0; i < kNumberOfChunkQueues; i++) {
|
||||
for (int i = 0; i < ChunkQueueType::kNumberOfChunkQueues; i++) {
|
||||
DCHECK(chunks_[i].empty());
|
||||
}
|
||||
}
|
||||
|
||||
size_t MemoryAllocator::Unmapper::NumberOfCommittedChunks() {
|
||||
base::MutexGuard guard(&mutex_);
|
||||
return chunks_[kRegular].size() + chunks_[kNonRegular].size();
|
||||
return chunks_[ChunkQueueType::kRegular].size() +
|
||||
chunks_[ChunkQueueType::kNonRegular].size();
|
||||
}
|
||||
|
||||
int MemoryAllocator::Unmapper::NumberOfChunks() {
|
||||
base::MutexGuard guard(&mutex_);
|
||||
size_t result = 0;
|
||||
for (int i = 0; i < kNumberOfChunkQueues; i++) {
|
||||
for (int i = 0; i < ChunkQueueType::kNumberOfChunkQueues; i++) {
|
||||
result += chunks_[i].size();
|
||||
}
|
||||
return static_cast<int>(result);
|
||||
@ -202,10 +204,10 @@ size_t MemoryAllocator::Unmapper::CommittedBufferedMemory() {
|
||||
size_t sum = 0;
|
||||
// kPooled chunks are already uncommited. We only have to account for
|
||||
// kRegular and kNonRegular chunks.
|
||||
for (auto& chunk : chunks_[kRegular]) {
|
||||
for (auto& chunk : chunks_[ChunkQueueType::kRegular]) {
|
||||
sum += chunk->size();
|
||||
}
|
||||
for (auto& chunk : chunks_[kNonRegular]) {
|
||||
for (auto& chunk : chunks_[ChunkQueueType::kNonRegular]) {
|
||||
sum += chunk->size();
|
||||
}
|
||||
return sum;
|
||||
@ -236,21 +238,42 @@ void MemoryAllocator::FreeMemoryRegion(v8::PageAllocator* page_allocator,
|
||||
}
|
||||
|
||||
Address MemoryAllocator::AllocateAlignedMemory(
|
||||
size_t reserve_size, size_t commit_size, size_t alignment,
|
||||
size_t chunk_size, size_t area_size, size_t alignment,
|
||||
Executability executable, void* hint, VirtualMemory* controller) {
|
||||
v8::PageAllocator* page_allocator = this->page_allocator(executable);
|
||||
DCHECK(commit_size <= reserve_size);
|
||||
VirtualMemory reservation(page_allocator, reserve_size, hint, alignment);
|
||||
DCHECK_LT(area_size, chunk_size);
|
||||
|
||||
VirtualMemory reservation(page_allocator, chunk_size, hint, alignment);
|
||||
if (!reservation.IsReserved()) return kNullAddress;
|
||||
|
||||
// We cannot use the last chunk in the address space because we would
|
||||
// overflow when comparing top and limit if this chunk is used for a
|
||||
// linear allocation area.
|
||||
if ((reservation.address() + static_cast<Address>(chunk_size)) == 0u) {
|
||||
CHECK(!reserved_chunk_at_virtual_memory_limit_);
|
||||
reserved_chunk_at_virtual_memory_limit_ = std::move(reservation);
|
||||
CHECK(reserved_chunk_at_virtual_memory_limit_);
|
||||
|
||||
// Retry reserve virtual memory.
|
||||
reservation = VirtualMemory(page_allocator, chunk_size, hint, alignment);
|
||||
if (!reservation.IsReserved()) return kNullAddress;
|
||||
}
|
||||
|
||||
Address base = reservation.address();
|
||||
size_ += reservation.size();
|
||||
|
||||
if (executable == EXECUTABLE) {
|
||||
if (!CommitExecutableMemory(&reservation, base, commit_size,
|
||||
reserve_size)) {
|
||||
const size_t aligned_area_size = ::RoundUp(area_size, GetCommitPageSize());
|
||||
if (!SetPermissionsOnExecutableMemoryChunk(&reservation, base,
|
||||
aligned_area_size, chunk_size)) {
|
||||
base = kNullAddress;
|
||||
}
|
||||
} else {
|
||||
// No guard page between page header and object area. This allows us to make
|
||||
// all OS pages for both regions readable+writable at once.
|
||||
const size_t commit_size =
|
||||
::RoundUp(MemoryChunkLayout::ObjectStartOffsetInDataPage() + area_size,
|
||||
GetCommitPageSize());
|
||||
|
||||
if (reservation.SetPermissions(base, commit_size,
|
||||
PageAllocator::kReadWrite)) {
|
||||
UpdateAllocatedSpaceLimits(base, base + commit_size);
|
||||
@ -263,7 +286,6 @@ Address MemoryAllocator::AllocateAlignedMemory(
|
||||
// Failed to commit the body. Free the mapping and any partially committed
|
||||
// regions inside it.
|
||||
reservation.Free();
|
||||
size_ -= reserve_size;
|
||||
return kNullAddress;
|
||||
}
|
||||
|
||||
@ -271,151 +293,103 @@ Address MemoryAllocator::AllocateAlignedMemory(
|
||||
return base;
|
||||
}
|
||||
|
||||
V8_EXPORT_PRIVATE BasicMemoryChunk* MemoryAllocator::AllocateBasicChunk(
|
||||
size_t reserve_area_size, size_t commit_area_size, Executability executable,
|
||||
BaseSpace* owner) {
|
||||
DCHECK_LE(commit_area_size, reserve_area_size);
|
||||
size_t MemoryAllocator::ComputeChunkSize(size_t area_size,
|
||||
Executability executable) {
|
||||
if (executable == EXECUTABLE) {
|
||||
//
|
||||
// Executable
|
||||
// +----------------------------+<- base aligned at MemoryChunk::kAlignment
|
||||
// | Header |
|
||||
// +----------------------------+<- base + CodePageGuardStartOffset
|
||||
// | Guard |
|
||||
// +----------------------------+<- area_start_
|
||||
// | Area |
|
||||
// +----------------------------+<- area_end_ (area_start + area_size)
|
||||
// | Committed but not used |
|
||||
// +----------------------------+<- aligned at OS page boundary
|
||||
// | Guard |
|
||||
// +----------------------------+<- base + chunk_size
|
||||
//
|
||||
|
||||
size_t chunk_size;
|
||||
Heap* heap = isolate_->heap();
|
||||
Address base = kNullAddress;
|
||||
VirtualMemory reservation;
|
||||
Address area_start = kNullAddress;
|
||||
Address area_end = kNullAddress;
|
||||
return ::RoundUp(MemoryChunkLayout::ObjectStartOffsetInCodePage() +
|
||||
area_size + MemoryChunkLayout::CodePageGuardSize(),
|
||||
GetCommitPageSize());
|
||||
}
|
||||
|
||||
//
|
||||
// Non-executable
|
||||
// +----------------------------+<- base aligned at MemoryChunk::kAlignment
|
||||
// | Header |
|
||||
// +----------------------------+<- area_start_ (base + area_start_)
|
||||
// | Area |
|
||||
// +----------------------------+<- area_end_ (area_start + area_size)
|
||||
// | Committed but not used |
|
||||
// +----------------------------+<- base + chunk_size
|
||||
//
|
||||
DCHECK_EQ(executable, NOT_EXECUTABLE);
|
||||
|
||||
return ::RoundUp(MemoryChunkLayout::ObjectStartOffsetInDataPage() + area_size,
|
||||
GetCommitPageSize());
|
||||
}
|
||||
|
||||
base::Optional<MemoryAllocator::MemoryChunkAllocationResult>
|
||||
MemoryAllocator::AllocateUninitializedChunk(BaseSpace* space, size_t area_size,
|
||||
Executability executable,
|
||||
PageSize page_size) {
|
||||
#ifdef V8_COMPRESS_POINTERS
|
||||
// When pointer compression is enabled, spaces are expected to be at a
|
||||
// predictable address (see mkgrokdump) so we don't supply a hint and rely on
|
||||
// the deterministic behaviour of the BoundedPageAllocator.
|
||||
void* address_hint = nullptr;
|
||||
#else
|
||||
void* address_hint =
|
||||
AlignedAddress(heap->GetRandomMmapAddr(), MemoryChunk::kAlignment);
|
||||
void* address_hint = AlignedAddress(isolate_->heap()->GetRandomMmapAddr(),
|
||||
MemoryChunk::kAlignment);
|
||||
#endif
|
||||
|
||||
//
|
||||
// MemoryChunk layout:
|
||||
//
|
||||
// Executable
|
||||
// +----------------------------+<- base aligned with MemoryChunk::kAlignment
|
||||
// | Header |
|
||||
// +----------------------------+<- base + CodePageGuardStartOffset
|
||||
// | Guard |
|
||||
// +----------------------------+<- area_start_
|
||||
// | Area |
|
||||
// +----------------------------+<- area_end_ (area_start + commit_area_size)
|
||||
// | Committed but not used |
|
||||
// +----------------------------+<- aligned at OS page boundary
|
||||
// | Reserved but not committed |
|
||||
// +----------------------------+<- aligned at OS page boundary
|
||||
// | Guard |
|
||||
// +----------------------------+<- base + chunk_size
|
||||
//
|
||||
// Non-executable
|
||||
// +----------------------------+<- base aligned with MemoryChunk::kAlignment
|
||||
// | Header |
|
||||
// +----------------------------+<- area_start_ (base + area_start_)
|
||||
// | Area |
|
||||
// +----------------------------+<- area_end_ (area_start + commit_area_size)
|
||||
// | Committed but not used |
|
||||
// +----------------------------+<- aligned at OS page boundary
|
||||
// | Reserved but not committed |
|
||||
// +----------------------------+<- base + chunk_size
|
||||
//
|
||||
VirtualMemory reservation;
|
||||
size_t chunk_size = ComputeChunkSize(area_size, executable);
|
||||
DCHECK_EQ(chunk_size % GetCommitPageSize(), 0);
|
||||
|
||||
Address base =
|
||||
AllocateAlignedMemory(chunk_size, area_size, MemoryChunk::kAlignment,
|
||||
executable, address_hint, &reservation);
|
||||
if (base == kNullAddress) return {};
|
||||
|
||||
size_ += reservation.size();
|
||||
|
||||
// Update executable memory size.
|
||||
if (executable == EXECUTABLE) {
|
||||
chunk_size = ::RoundUp(MemoryChunkLayout::ObjectStartOffsetInCodePage() +
|
||||
reserve_area_size +
|
||||
MemoryChunkLayout::CodePageGuardSize(),
|
||||
GetCommitPageSize());
|
||||
|
||||
// Size of header (not executable) plus area (executable).
|
||||
size_t commit_size = ::RoundUp(
|
||||
MemoryChunkLayout::CodePageGuardStartOffset() + commit_area_size,
|
||||
GetCommitPageSize());
|
||||
base =
|
||||
AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
|
||||
executable, address_hint, &reservation);
|
||||
if (base == kNullAddress) return nullptr;
|
||||
// Update executable memory size.
|
||||
size_executable_ += reservation.size();
|
||||
|
||||
if (Heap::ShouldZapGarbage()) {
|
||||
ZapBlock(base, MemoryChunkLayout::CodePageGuardStartOffset(), kZapValue);
|
||||
ZapBlock(base + MemoryChunkLayout::ObjectStartOffsetInCodePage(),
|
||||
commit_area_size, kZapValue);
|
||||
}
|
||||
|
||||
area_start = base + MemoryChunkLayout::ObjectStartOffsetInCodePage();
|
||||
area_end = area_start + commit_area_size;
|
||||
} else {
|
||||
chunk_size = ::RoundUp(
|
||||
MemoryChunkLayout::ObjectStartOffsetInDataPage() + reserve_area_size,
|
||||
GetCommitPageSize());
|
||||
size_t commit_size = ::RoundUp(
|
||||
MemoryChunkLayout::ObjectStartOffsetInDataPage() + commit_area_size,
|
||||
GetCommitPageSize());
|
||||
base =
|
||||
AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
|
||||
executable, address_hint, &reservation);
|
||||
|
||||
if (base == kNullAddress) return nullptr;
|
||||
|
||||
if (Heap::ShouldZapGarbage()) {
|
||||
ZapBlock(
|
||||
base,
|
||||
MemoryChunkLayout::ObjectStartOffsetInDataPage() + commit_area_size,
|
||||
kZapValue);
|
||||
}
|
||||
|
||||
area_start = base + MemoryChunkLayout::ObjectStartOffsetInDataPage();
|
||||
area_end = area_start + commit_area_size;
|
||||
}
|
||||
|
||||
// Use chunk_size for statistics because we assume that treat reserved but
|
||||
// not-yet committed memory regions of chunks as allocated.
|
||||
if (Heap::ShouldZapGarbage()) {
|
||||
if (executable == EXECUTABLE) {
|
||||
// Page header and object area is split by guard page. Zap page header
|
||||
// first.
|
||||
ZapBlock(base, MemoryChunkLayout::CodePageGuardStartOffset(), kZapValue);
|
||||
// Now zap object area.
|
||||
ZapBlock(base + MemoryChunkLayout::ObjectStartOffsetInCodePage(),
|
||||
area_size, kZapValue);
|
||||
} else {
|
||||
DCHECK_EQ(executable, NOT_EXECUTABLE);
|
||||
// Zap both page header and object area at once. No guard page in-between.
|
||||
ZapBlock(base,
|
||||
MemoryChunkLayout::ObjectStartOffsetInDataPage() + area_size,
|
||||
kZapValue);
|
||||
}
|
||||
}
|
||||
|
||||
LOG(isolate_,
|
||||
NewEvent("MemoryChunk", reinterpret_cast<void*>(base), chunk_size));
|
||||
|
||||
// We cannot use the last chunk in the address space because we would
|
||||
// overflow when comparing top and limit if this chunk is used for a
|
||||
// linear allocation area.
|
||||
if ((base + chunk_size) == 0u) {
|
||||
CHECK(!last_chunk_.IsReserved());
|
||||
last_chunk_ = std::move(reservation);
|
||||
UncommitMemory(&last_chunk_);
|
||||
size_ -= chunk_size;
|
||||
if (executable == EXECUTABLE) {
|
||||
size_executable_ -= chunk_size;
|
||||
}
|
||||
CHECK(last_chunk_.IsReserved());
|
||||
return AllocateBasicChunk(reserve_area_size, commit_area_size, executable,
|
||||
owner);
|
||||
}
|
||||
Address area_start = base + MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(
|
||||
space->identity());
|
||||
Address area_end = area_start + area_size;
|
||||
|
||||
BasicMemoryChunk* chunk =
|
||||
BasicMemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
|
||||
owner, std::move(reservation));
|
||||
|
||||
return chunk;
|
||||
}
|
||||
|
||||
MemoryChunk* MemoryAllocator::AllocateChunk(size_t reserve_area_size,
|
||||
size_t commit_area_size,
|
||||
Executability executable,
|
||||
PageSize page_size,
|
||||
BaseSpace* owner) {
|
||||
BasicMemoryChunk* basic_chunk = AllocateBasicChunk(
|
||||
reserve_area_size, commit_area_size, executable, owner);
|
||||
|
||||
if (basic_chunk == nullptr) return nullptr;
|
||||
|
||||
MemoryChunk* chunk = MemoryChunk::Initialize(basic_chunk, isolate_->heap(),
|
||||
executable, page_size);
|
||||
|
||||
#ifdef DEBUG
|
||||
if (chunk->executable()) RegisterExecutableMemoryChunk(chunk);
|
||||
#endif // DEBUG
|
||||
return chunk;
|
||||
return MemoryChunkAllocationResult{
|
||||
reinterpret_cast<void*>(base), chunk_size, area_start, area_end,
|
||||
std::move(reservation),
|
||||
};
|
||||
}
|
||||
|
||||
void MemoryAllocator::PartialFreeMemory(BasicMemoryChunk* chunk,
|
||||
@ -527,16 +501,16 @@ void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
|
||||
|
||||
void MemoryAllocator::Free(MemoryAllocator::FreeMode mode, MemoryChunk* chunk) {
|
||||
switch (mode) {
|
||||
case kImmediately:
|
||||
case FreeMode::kImmediately:
|
||||
PreFreeMemory(chunk);
|
||||
PerformFreeMemory(chunk);
|
||||
break;
|
||||
case kConcurrentlyAndPool:
|
||||
case FreeMode::kConcurrentlyAndPool:
|
||||
DCHECK_EQ(chunk->size(), static_cast<size_t>(MemoryChunk::kPageSize));
|
||||
DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
|
||||
chunk->SetFlag(MemoryChunk::POOLED);
|
||||
V8_FALLTHROUGH;
|
||||
case kConcurrently:
|
||||
case FreeMode::kConcurrently:
|
||||
PreFreeMemory(chunk);
|
||||
// The chunks added to this queue will be freed by a concurrent thread.
|
||||
unmapper()->AddMemoryChunkSafe(chunk);
|
||||
@ -552,29 +526,47 @@ void MemoryAllocator::FreePooledChunk(MemoryChunk* chunk) {
|
||||
}
|
||||
|
||||
Page* MemoryAllocator::AllocatePage(MemoryAllocator::AllocationMode alloc_mode,
|
||||
size_t size, Space* owner,
|
||||
Executability executable) {
|
||||
MemoryChunk* chunk = nullptr;
|
||||
if (alloc_mode == kUsePool) {
|
||||
Space* space, Executability executable) {
|
||||
size_t size =
|
||||
MemoryChunkLayout::AllocatableMemoryInMemoryChunk(space->identity());
|
||||
base::Optional<MemoryChunkAllocationResult> chunk_info;
|
||||
if (alloc_mode == AllocationMode::kUsePool) {
|
||||
DCHECK_EQ(size, static_cast<size_t>(
|
||||
MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
|
||||
owner->identity())));
|
||||
space->identity())));
|
||||
DCHECK_EQ(executable, NOT_EXECUTABLE);
|
||||
chunk = AllocatePagePooled(owner);
|
||||
chunk_info = AllocateUninitializedPageFromPool(space);
|
||||
}
|
||||
if (chunk == nullptr) {
|
||||
chunk = AllocateChunk(size, size, executable, PageSize::kRegular, owner);
|
||||
|
||||
if (!chunk_info) {
|
||||
chunk_info =
|
||||
AllocateUninitializedChunk(space, size, executable, PageSize::kRegular);
|
||||
}
|
||||
if (chunk == nullptr) return nullptr;
|
||||
return owner->InitializePage(chunk);
|
||||
|
||||
if (!chunk_info) return nullptr;
|
||||
|
||||
Page* page = new (chunk_info->start) Page(
|
||||
isolate_->heap(), space, chunk_info->size, chunk_info->area_start,
|
||||
chunk_info->area_end, std::move(chunk_info->reservation), executable);
|
||||
|
||||
#ifdef DEBUG
|
||||
if (page->executable()) RegisterExecutableMemoryChunk(page);
|
||||
#endif // DEBUG
|
||||
|
||||
space->InitializePage(page);
|
||||
return page;
|
||||
}
|
||||
|
||||
ReadOnlyPage* MemoryAllocator::AllocateReadOnlyPage(size_t size,
|
||||
ReadOnlySpace* owner) {
|
||||
BasicMemoryChunk* chunk =
|
||||
AllocateBasicChunk(size, size, NOT_EXECUTABLE, owner);
|
||||
if (chunk == nullptr) return nullptr;
|
||||
return owner->InitializePage(chunk);
|
||||
ReadOnlyPage* MemoryAllocator::AllocateReadOnlyPage(ReadOnlySpace* space) {
|
||||
DCHECK_EQ(space->identity(), RO_SPACE);
|
||||
size_t size = MemoryChunkLayout::AllocatableMemoryInMemoryChunk(RO_SPACE);
|
||||
base::Optional<MemoryChunkAllocationResult> chunk_info =
|
||||
AllocateUninitializedChunk(space, size, NOT_EXECUTABLE,
|
||||
PageSize::kRegular);
|
||||
if (!chunk_info) return nullptr;
|
||||
return new (chunk_info->start) ReadOnlyPage(
|
||||
isolate_->heap(), space, chunk_info->size, chunk_info->area_start,
|
||||
chunk_info->area_end, std::move(chunk_info->reservation));
|
||||
}
|
||||
|
||||
std::unique_ptr<::v8::PageAllocator::SharedMemoryMapping>
|
||||
@ -583,38 +575,48 @@ MemoryAllocator::RemapSharedPage(
|
||||
return shared_memory->RemapTo(reinterpret_cast<void*>(new_address));
|
||||
}
|
||||
|
||||
LargePage* MemoryAllocator::AllocateLargePage(size_t size,
|
||||
LargeObjectSpace* owner,
|
||||
LargePage* MemoryAllocator::AllocateLargePage(LargeObjectSpace* space,
|
||||
size_t object_size,
|
||||
Executability executable) {
|
||||
MemoryChunk* chunk =
|
||||
AllocateChunk(size, size, executable, PageSize::kLarge, owner);
|
||||
if (chunk == nullptr) return nullptr;
|
||||
return LargePage::Initialize(isolate_->heap(), chunk, executable);
|
||||
base::Optional<MemoryChunkAllocationResult> chunk_info =
|
||||
AllocateUninitializedChunk(space, object_size, executable,
|
||||
PageSize::kLarge);
|
||||
|
||||
if (!chunk_info) return nullptr;
|
||||
|
||||
LargePage* page = new (chunk_info->start) LargePage(
|
||||
isolate_->heap(), space, chunk_info->size, chunk_info->area_start,
|
||||
chunk_info->area_end, std::move(chunk_info->reservation), executable);
|
||||
|
||||
#ifdef DEBUG
|
||||
if (page->executable()) RegisterExecutableMemoryChunk(page);
|
||||
#endif // DEBUG
|
||||
|
||||
return page;
|
||||
}
|
||||
|
||||
MemoryChunk* MemoryAllocator::AllocatePagePooled(Space* owner) {
|
||||
MemoryChunk* chunk = unmapper()->TryGetPooledMemoryChunkSafe();
|
||||
if (chunk == nullptr) return nullptr;
|
||||
base::Optional<MemoryAllocator::MemoryChunkAllocationResult>
|
||||
MemoryAllocator::AllocateUninitializedPageFromPool(Space* space) {
|
||||
void* chunk = unmapper()->TryGetPooledMemoryChunkSafe();
|
||||
if (chunk == nullptr) return {};
|
||||
const int size = MemoryChunk::kPageSize;
|
||||
const Address start = reinterpret_cast<Address>(chunk);
|
||||
const Address area_start =
|
||||
start +
|
||||
MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(owner->identity());
|
||||
MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(space->identity());
|
||||
const Address area_end = start + size;
|
||||
// Pooled pages are always regular data pages.
|
||||
DCHECK_NE(CODE_SPACE, owner->identity());
|
||||
DCHECK_NE(CODE_SPACE, space->identity());
|
||||
VirtualMemory reservation(data_page_allocator(), start, size);
|
||||
if (!CommitMemory(&reservation)) return nullptr;
|
||||
if (!CommitMemory(&reservation)) return {};
|
||||
if (Heap::ShouldZapGarbage()) {
|
||||
ZapBlock(start, size, kZapValue);
|
||||
}
|
||||
BasicMemoryChunk* basic_chunk =
|
||||
BasicMemoryChunk::Initialize(isolate_->heap(), start, size, area_start,
|
||||
area_end, owner, std::move(reservation));
|
||||
MemoryChunk::Initialize(basic_chunk, isolate_->heap(), NOT_EXECUTABLE,
|
||||
PageSize::kRegular);
|
||||
|
||||
size_ += size;
|
||||
return chunk;
|
||||
return MemoryChunkAllocationResult{
|
||||
chunk, size, area_start, area_end, std::move(reservation),
|
||||
};
|
||||
}
|
||||
|
||||
void MemoryAllocator::ZapBlock(Address start, size_t size,
|
||||
@ -645,42 +647,50 @@ base::AddressRegion MemoryAllocator::ComputeDiscardMemoryArea(Address addr,
|
||||
discardable_end - discardable_start);
|
||||
}
|
||||
|
||||
bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, Address start,
|
||||
size_t commit_size,
|
||||
size_t reserved_size) {
|
||||
bool MemoryAllocator::SetPermissionsOnExecutableMemoryChunk(VirtualMemory* vm,
|
||||
Address start,
|
||||
size_t area_size,
|
||||
size_t chunk_size) {
|
||||
const size_t page_size = GetCommitPageSize();
|
||||
|
||||
// All addresses and sizes must be aligned to the commit page size.
|
||||
DCHECK(IsAligned(start, page_size));
|
||||
DCHECK_EQ(0, commit_size % page_size);
|
||||
DCHECK_EQ(0, reserved_size % page_size);
|
||||
DCHECK_EQ(0, area_size % page_size);
|
||||
DCHECK_EQ(0, chunk_size % page_size);
|
||||
|
||||
const size_t guard_size = MemoryChunkLayout::CodePageGuardSize();
|
||||
const size_t pre_guard_offset = MemoryChunkLayout::CodePageGuardStartOffset();
|
||||
const size_t code_area_offset =
|
||||
MemoryChunkLayout::ObjectStartOffsetInCodePage();
|
||||
// reserved_size includes two guard regions, commit_size does not.
|
||||
DCHECK_LE(commit_size, reserved_size - 2 * guard_size);
|
||||
|
||||
DCHECK_EQ(pre_guard_offset + guard_size + area_size + guard_size, chunk_size);
|
||||
|
||||
const Address pre_guard_page = start + pre_guard_offset;
|
||||
const Address code_area = start + code_area_offset;
|
||||
const Address post_guard_page = start + reserved_size - guard_size;
|
||||
const Address post_guard_page = start + chunk_size - guard_size;
|
||||
|
||||
// Commit the non-executable header, from start to pre-code guard page.
|
||||
if (vm->SetPermissions(start, pre_guard_offset, PageAllocator::kReadWrite)) {
|
||||
// Create the pre-code guard page, following the header.
|
||||
if (vm->SetPermissions(pre_guard_page, page_size,
|
||||
PageAllocator::kNoAccess)) {
|
||||
// Commit the executable code body.
|
||||
if (vm->SetPermissions(code_area, commit_size - pre_guard_offset,
|
||||
if (vm->SetPermissions(code_area, area_size,
|
||||
MemoryChunk::GetCodeModificationPermission())) {
|
||||
// Create the post-code guard page.
|
||||
if (vm->SetPermissions(post_guard_page, page_size,
|
||||
PageAllocator::kNoAccess)) {
|
||||
UpdateAllocatedSpaceLimits(start, code_area + commit_size);
|
||||
UpdateAllocatedSpaceLimits(start, code_area + area_size);
|
||||
return true;
|
||||
}
|
||||
vm->SetPermissions(code_area, commit_size, PageAllocator::kNoAccess);
|
||||
|
||||
vm->SetPermissions(code_area, area_size, PageAllocator::kNoAccess);
|
||||
}
|
||||
}
|
||||
|
||||
vm->SetPermissions(start, pre_guard_offset, PageAllocator::kNoAccess);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -17,6 +17,8 @@
|
||||
#include "src/base/macros.h"
|
||||
#include "src/base/platform/mutex.h"
|
||||
#include "src/base/platform/semaphore.h"
|
||||
#include "src/common/globals.h"
|
||||
#include "src/heap/basic-memory-chunk.h"
|
||||
#include "src/heap/code-range.h"
|
||||
#include "src/heap/memory-chunk.h"
|
||||
#include "src/heap/spaces.h"
|
||||
@ -44,15 +46,15 @@ class MemoryAllocator {
|
||||
|
||||
Unmapper(Heap* heap, MemoryAllocator* allocator)
|
||||
: heap_(heap), allocator_(allocator) {
|
||||
chunks_[kRegular].reserve(kReservedQueueingSlots);
|
||||
chunks_[kPooled].reserve(kReservedQueueingSlots);
|
||||
chunks_[ChunkQueueType::kRegular].reserve(kReservedQueueingSlots);
|
||||
chunks_[ChunkQueueType::kPooled].reserve(kReservedQueueingSlots);
|
||||
}
|
||||
|
||||
void AddMemoryChunkSafe(MemoryChunk* chunk) {
|
||||
if (!chunk->IsLargePage() && chunk->executable() != EXECUTABLE) {
|
||||
AddMemoryChunkSafe(kRegular, chunk);
|
||||
AddMemoryChunkSafe(ChunkQueueType::kRegular, chunk);
|
||||
} else {
|
||||
AddMemoryChunkSafe(kNonRegular, chunk);
|
||||
AddMemoryChunkSafe(ChunkQueueType::kNonRegular, chunk);
|
||||
}
|
||||
}
|
||||
|
||||
@ -62,9 +64,9 @@ class MemoryAllocator {
|
||||
// been uncommitted.
|
||||
// (2) Try to steal any memory chunk of kPageSize that would've been
|
||||
// uncommitted.
|
||||
MemoryChunk* chunk = GetMemoryChunkSafe(kPooled);
|
||||
MemoryChunk* chunk = GetMemoryChunkSafe(ChunkQueueType::kPooled);
|
||||
if (chunk == nullptr) {
|
||||
chunk = GetMemoryChunkSafe(kRegular);
|
||||
chunk = GetMemoryChunkSafe(ChunkQueueType::kRegular);
|
||||
if (chunk != nullptr) {
|
||||
// For stolen chunks we need to manually free any allocated memory.
|
||||
chunk->ReleaseAllAllocatedMemory();
|
||||
@ -126,13 +128,13 @@ class MemoryAllocator {
|
||||
Heap* const heap_;
|
||||
MemoryAllocator* const allocator_;
|
||||
base::Mutex mutex_;
|
||||
std::vector<MemoryChunk*> chunks_[kNumberOfChunkQueues];
|
||||
std::vector<MemoryChunk*> chunks_[ChunkQueueType::kNumberOfChunkQueues];
|
||||
std::unique_ptr<v8::JobHandle> job_handle_;
|
||||
|
||||
friend class MemoryAllocator;
|
||||
};
|
||||
|
||||
enum AllocationMode {
|
||||
enum class AllocationMode {
|
||||
// Regular allocation path. Does not use pool.
|
||||
kRegular,
|
||||
|
||||
@ -140,7 +142,7 @@ class MemoryAllocator {
|
||||
kUsePool,
|
||||
};
|
||||
|
||||
enum FreeMode {
|
||||
enum class FreeMode {
|
||||
// Frees page immediately on the main thread.
|
||||
kImmediately,
|
||||
|
||||
@ -182,13 +184,14 @@ class MemoryAllocator {
|
||||
// whether pooled allocation, which only works for MemoryChunk::kPageSize,
|
||||
// should be tried first.
|
||||
V8_EXPORT_PRIVATE Page* AllocatePage(
|
||||
MemoryAllocator::AllocationMode alloc_mode, size_t size, Space* owner,
|
||||
MemoryAllocator::AllocationMode alloc_mode, Space* space,
|
||||
Executability executable);
|
||||
|
||||
LargePage* AllocateLargePage(size_t size, LargeObjectSpace* owner,
|
||||
Executability executable);
|
||||
V8_EXPORT_PRIVATE LargePage* AllocateLargePage(LargeObjectSpace* space,
|
||||
size_t object_size,
|
||||
Executability executable);
|
||||
|
||||
ReadOnlyPage* AllocateReadOnlyPage(size_t size, ReadOnlySpace* owner);
|
||||
ReadOnlyPage* AllocateReadOnlyPage(ReadOnlySpace* space);
|
||||
|
||||
std::unique_ptr<::v8::PageAllocator::SharedMemoryMapping> RemapSharedPage(
|
||||
::v8::PageAllocator::SharedMemory* shared_memory, Address new_address);
|
||||
@ -216,15 +219,6 @@ class MemoryAllocator {
|
||||
address >= highest_ever_allocated_;
|
||||
}
|
||||
|
||||
// Returns a MemoryChunk in which the memory region from commit_area_size to
|
||||
// reserve_area_size of the chunk area is reserved but not committed, it
|
||||
// could be committed later by calling MemoryChunk::CommitArea.
|
||||
V8_EXPORT_PRIVATE MemoryChunk* AllocateChunk(size_t reserve_area_size,
|
||||
size_t commit_area_size,
|
||||
Executability executable,
|
||||
PageSize page_size,
|
||||
BaseSpace* space);
|
||||
|
||||
// Partially release |bytes_to_free| bytes starting at |start_free|. Note that
|
||||
// internally memory is freed from |start_free| to the end of the reservation.
|
||||
// Additional memory beyond the page is not accounted though, so
|
||||
@ -265,14 +259,29 @@ class MemoryAllocator {
|
||||
void UnregisterReadOnlyPage(ReadOnlyPage* page);
|
||||
|
||||
private:
|
||||
// Returns a BasicMemoryChunk in which the memory region from commit_area_size
|
||||
// to reserve_area_size of the chunk area is reserved but not committed, it
|
||||
// could be committed later by calling MemoryChunk::CommitArea.
|
||||
V8_EXPORT_PRIVATE BasicMemoryChunk* AllocateBasicChunk(
|
||||
size_t reserve_area_size, size_t commit_area_size,
|
||||
Executability executable, BaseSpace* space);
|
||||
// Used to store all data about MemoryChunk allocation, e.g. in
|
||||
// AllocateUninitializedChunk.
|
||||
struct MemoryChunkAllocationResult {
|
||||
void* start;
|
||||
size_t size;
|
||||
size_t area_start;
|
||||
size_t area_end;
|
||||
VirtualMemory reservation;
|
||||
};
|
||||
|
||||
Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
|
||||
// Computes the size of a MemoryChunk from the size of the object_area and
|
||||
// whether the chunk is executable or not.
|
||||
static size_t ComputeChunkSize(size_t area_size, Executability executable);
|
||||
|
||||
// Internal allocation method for all pages/memory chunks. Returns data about
|
||||
// the unintialized memory region.
|
||||
V8_WARN_UNUSED_RESULT base::Optional<MemoryChunkAllocationResult>
|
||||
AllocateUninitializedChunk(BaseSpace* space, size_t area_size,
|
||||
Executability executable, PageSize page_size);
|
||||
|
||||
// Internal raw allocation method that allocates an aligned MemoryChunk and
|
||||
// sets the right memory permissions.
|
||||
Address AllocateAlignedMemory(size_t chunk_size, size_t area_size,
|
||||
size_t alignment, Executability executable,
|
||||
void* hint, VirtualMemory* controller);
|
||||
|
||||
@ -280,10 +289,11 @@ class MemoryAllocator {
|
||||
// it succeeded and false otherwise.
|
||||
bool CommitMemory(VirtualMemory* reservation);
|
||||
|
||||
V8_WARN_UNUSED_RESULT bool CommitExecutableMemory(VirtualMemory* vm,
|
||||
Address start,
|
||||
size_t commit_size,
|
||||
size_t reserved_size);
|
||||
// Sets memory permissions on executable memory chunks. This entails page
|
||||
// header (RW), guard pages (no access) and the object area (code modification
|
||||
// permissions).
|
||||
V8_WARN_UNUSED_RESULT bool SetPermissionsOnExecutableMemoryChunk(
|
||||
VirtualMemory* vm, Address start, size_t area_size, size_t reserved_size);
|
||||
|
||||
// Disallows any access on memory region owned by given reservation object.
|
||||
// Returns true if it succeeded and false otherwise.
|
||||
@ -304,7 +314,8 @@ class MemoryAllocator {
|
||||
|
||||
// See AllocatePage for public interface. Note that currently we only
|
||||
// support pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize.
|
||||
MemoryChunk* AllocatePagePooled(Space* owner);
|
||||
base::Optional<MemoryChunkAllocationResult> AllocateUninitializedPageFromPool(
|
||||
Space* space);
|
||||
|
||||
// Frees a pooled page. Only used on tear-down and last-resort GCs.
|
||||
void FreePooledChunk(MemoryChunk* chunk);
|
||||
@ -314,7 +325,7 @@ class MemoryAllocator {
|
||||
// collector to rebuild page headers in the from space, which is
|
||||
// used as a marking stack and its page headers are destroyed.
|
||||
Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
|
||||
PagedSpace* owner);
|
||||
PagedSpace* space);
|
||||
|
||||
void UpdateAllocatedSpaceLimits(Address low, Address high) {
|
||||
// The use of atomic primitives does not guarantee correctness (wrt.
|
||||
@ -385,7 +396,7 @@ class MemoryAllocator {
|
||||
std::atomic<Address> lowest_ever_allocated_;
|
||||
std::atomic<Address> highest_ever_allocated_;
|
||||
|
||||
VirtualMemory last_chunk_;
|
||||
base::Optional<VirtualMemory> reserved_chunk_at_virtual_memory_limit_;
|
||||
Unmapper unmapper_;
|
||||
|
||||
#ifdef DEBUG
|
||||
|
@ -4,6 +4,7 @@
|
||||
|
||||
#include "src/heap/memory-chunk-layout.h"
|
||||
|
||||
#include "src/common/globals.h"
|
||||
#include "src/heap/marking.h"
|
||||
#include "src/heap/memory-allocator.h"
|
||||
#include "src/heap/memory-chunk.h"
|
||||
@ -46,7 +47,7 @@ intptr_t MemoryChunkLayout::ObjectStartOffsetInDataPage() {
|
||||
|
||||
size_t MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(
|
||||
AllocationSpace space) {
|
||||
if (space == CODE_SPACE) {
|
||||
if (space == CODE_SPACE || space == CODE_LO_SPACE) {
|
||||
return ObjectStartOffsetInCodePage();
|
||||
}
|
||||
return ObjectStartOffsetInDataPage();
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include "src/base/platform/platform.h"
|
||||
#include "src/base/platform/wrappers.h"
|
||||
#include "src/common/globals.h"
|
||||
#include "src/heap/basic-memory-chunk.h"
|
||||
#include "src/heap/code-object-registry.h"
|
||||
#include "src/heap/memory-allocator.h"
|
||||
#include "src/heap/memory-chunk-inl.h"
|
||||
@ -118,92 +119,83 @@ PageAllocator::Permission DefaultWritableCodePermissions() {
|
||||
|
||||
} // namespace
|
||||
|
||||
MemoryChunk* MemoryChunk::Initialize(BasicMemoryChunk* basic_chunk, Heap* heap,
|
||||
Executability executable,
|
||||
PageSize page_size) {
|
||||
MemoryChunk* chunk = static_cast<MemoryChunk*>(basic_chunk);
|
||||
|
||||
base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_NEW], nullptr);
|
||||
base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_OLD], nullptr);
|
||||
base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_SHARED],
|
||||
nullptr);
|
||||
MemoryChunk::MemoryChunk(Heap* heap, BaseSpace* space, size_t chunk_size,
|
||||
Address area_start, Address area_end,
|
||||
VirtualMemory reservation, Executability executable,
|
||||
PageSize page_size)
|
||||
: BasicMemoryChunk(heap, space, chunk_size, area_start, area_end,
|
||||
std::move(reservation)) {
|
||||
base::AsAtomicPointer::Release_Store(&slot_set_[OLD_TO_NEW], nullptr);
|
||||
base::AsAtomicPointer::Release_Store(&slot_set_[OLD_TO_OLD], nullptr);
|
||||
base::AsAtomicPointer::Release_Store(&slot_set_[OLD_TO_SHARED], nullptr);
|
||||
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
|
||||
base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_CODE],
|
||||
nullptr);
|
||||
base::AsAtomicPointer::Release_Store(&slot_set_[OLD_TO_CODE], nullptr);
|
||||
}
|
||||
base::AsAtomicPointer::Release_Store(&chunk->sweeping_slot_set_, nullptr);
|
||||
base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_NEW],
|
||||
base::AsAtomicPointer::Release_Store(&sweeping_slot_set_, nullptr);
|
||||
base::AsAtomicPointer::Release_Store(&typed_slot_set_[OLD_TO_NEW], nullptr);
|
||||
base::AsAtomicPointer::Release_Store(&typed_slot_set_[OLD_TO_OLD], nullptr);
|
||||
base::AsAtomicPointer::Release_Store(&typed_slot_set_[OLD_TO_SHARED],
|
||||
nullptr);
|
||||
base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_OLD],
|
||||
nullptr);
|
||||
base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_SHARED],
|
||||
nullptr);
|
||||
chunk->invalidated_slots_[OLD_TO_NEW] = nullptr;
|
||||
chunk->invalidated_slots_[OLD_TO_OLD] = nullptr;
|
||||
invalidated_slots_[OLD_TO_NEW] = nullptr;
|
||||
invalidated_slots_[OLD_TO_OLD] = nullptr;
|
||||
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
|
||||
// Not actually used but initialize anyway for predictability.
|
||||
chunk->invalidated_slots_[OLD_TO_CODE] = nullptr;
|
||||
invalidated_slots_[OLD_TO_CODE] = nullptr;
|
||||
}
|
||||
chunk->progress_bar_.Initialize();
|
||||
chunk->set_concurrent_sweeping_state(ConcurrentSweepingState::kDone);
|
||||
chunk->page_protection_change_mutex_ = new base::Mutex();
|
||||
chunk->write_unprotect_counter_ = 0;
|
||||
chunk->mutex_ = new base::Mutex();
|
||||
chunk->young_generation_bitmap_ = nullptr;
|
||||
progress_bar_.Initialize();
|
||||
set_concurrent_sweeping_state(ConcurrentSweepingState::kDone);
|
||||
page_protection_change_mutex_ = new base::Mutex();
|
||||
write_unprotect_counter_ = 0;
|
||||
mutex_ = new base::Mutex();
|
||||
young_generation_bitmap_ = nullptr;
|
||||
|
||||
chunk->external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] =
|
||||
0;
|
||||
chunk->external_backing_store_bytes_
|
||||
[ExternalBackingStoreType::kExternalString] = 0;
|
||||
external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] = 0;
|
||||
external_backing_store_bytes_[ExternalBackingStoreType::kExternalString] = 0;
|
||||
|
||||
chunk->categories_ = nullptr;
|
||||
categories_ = nullptr;
|
||||
|
||||
heap->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(chunk,
|
||||
heap->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(this,
|
||||
0);
|
||||
if (executable == EXECUTABLE) {
|
||||
chunk->SetFlag(IS_EXECUTABLE);
|
||||
SetFlag(IS_EXECUTABLE);
|
||||
if (heap->write_protect_code_memory()) {
|
||||
chunk->write_unprotect_counter_ =
|
||||
write_unprotect_counter_ =
|
||||
heap->code_space_memory_modification_scope_depth();
|
||||
} else {
|
||||
size_t page_size = MemoryAllocator::GetCommitPageSize();
|
||||
DCHECK(IsAligned(chunk->area_start(), page_size));
|
||||
size_t area_size =
|
||||
RoundUp(chunk->area_end() - chunk->area_start(), page_size);
|
||||
CHECK(chunk->reservation_.SetPermissions(
|
||||
chunk->area_start(), area_size, DefaultWritableCodePermissions()));
|
||||
DCHECK(IsAligned(area_start_, page_size));
|
||||
size_t area_size = RoundUp(area_end_ - area_start_, page_size);
|
||||
CHECK(reservation_.SetPermissions(area_start_, area_size,
|
||||
DefaultWritableCodePermissions()));
|
||||
}
|
||||
}
|
||||
|
||||
if (chunk->owner()->identity() == CODE_SPACE) {
|
||||
chunk->code_object_registry_ = new CodeObjectRegistry();
|
||||
if (owner()->identity() == CODE_SPACE) {
|
||||
code_object_registry_ = new CodeObjectRegistry();
|
||||
} else {
|
||||
chunk->code_object_registry_ = nullptr;
|
||||
code_object_registry_ = nullptr;
|
||||
}
|
||||
|
||||
chunk->possibly_empty_buckets_.Initialize();
|
||||
possibly_empty_buckets_.Initialize();
|
||||
|
||||
if (page_size == PageSize::kRegular) {
|
||||
chunk->active_system_pages_.Init(MemoryChunkLayout::kMemoryChunkHeaderSize,
|
||||
MemoryAllocator::GetCommitPageSizeBits(),
|
||||
chunk->size());
|
||||
active_system_pages_.Init(MemoryChunkLayout::kMemoryChunkHeaderSize,
|
||||
MemoryAllocator::GetCommitPageSizeBits(), size());
|
||||
} else {
|
||||
// We do not track active system pages for large pages.
|
||||
chunk->active_system_pages_.Clear();
|
||||
active_system_pages_.Clear();
|
||||
}
|
||||
|
||||
// All pages of a shared heap need to be marked with this flag.
|
||||
if (heap->IsShared()) chunk->SetFlag(IN_SHARED_HEAP);
|
||||
if (heap->IsShared()) SetFlag(MemoryChunk::IN_SHARED_HEAP);
|
||||
|
||||
#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
|
||||
chunk->object_start_bitmap_ = ObjectStartBitmap(chunk->area_start());
|
||||
#endif
|
||||
|
||||
#ifdef DEBUG
|
||||
ValidateOffsets(chunk);
|
||||
ValidateOffsets(this);
|
||||
#endif
|
||||
|
||||
return chunk;
|
||||
}
|
||||
|
||||
size_t MemoryChunk::CommittedPhysicalMemory() {
|
||||
|
@ -53,6 +53,10 @@ class MemoryChunk : public BasicMemoryChunk {
|
||||
// Maximum number of nested code memory modification scopes.
|
||||
static const int kMaxWriteUnprotectCounter = 3;
|
||||
|
||||
MemoryChunk(Heap* heap, BaseSpace* space, size_t size, Address area_start,
|
||||
Address area_end, VirtualMemory reservation,
|
||||
Executability executable, PageSize page_size);
|
||||
|
||||
// Only works if the pointer is in the first kPageSize of the MemoryChunk.
|
||||
static MemoryChunk* FromAddress(Address a) {
|
||||
return cast(BasicMemoryChunk::FromAddress(a));
|
||||
@ -219,9 +223,6 @@ class MemoryChunk : public BasicMemoryChunk {
|
||||
#endif
|
||||
|
||||
protected:
|
||||
static MemoryChunk* Initialize(BasicMemoryChunk* basic_chunk, Heap* heap,
|
||||
Executability executable, PageSize page_size);
|
||||
|
||||
// Release all memory allocated by the chunk. Should be called when memory
|
||||
// chunk is about to be freed.
|
||||
void ReleaseAllAllocatedMemory();
|
||||
|
@ -70,8 +70,8 @@ bool SemiSpace::EnsureCurrentCapacity() {
|
||||
// Clear new space flags to avoid this page being treated as a new
|
||||
// space page that is potentially being swept.
|
||||
current_page->ClearFlags(Page::kIsInYoungGenerationMask);
|
||||
heap()->memory_allocator()->Free(MemoryAllocator::kConcurrentlyAndPool,
|
||||
current_page);
|
||||
heap()->memory_allocator()->Free(
|
||||
MemoryAllocator::FreeMode::kConcurrentlyAndPool, current_page);
|
||||
current_page = next_current;
|
||||
}
|
||||
|
||||
@ -81,16 +81,14 @@ bool SemiSpace::EnsureCurrentCapacity() {
|
||||
while (actual_pages < expected_pages) {
|
||||
actual_pages++;
|
||||
current_page = heap()->memory_allocator()->AllocatePage(
|
||||
MemoryAllocator::kUsePool,
|
||||
MemoryChunkLayout::AllocatableMemoryInDataPage(), this,
|
||||
NOT_EXECUTABLE);
|
||||
MemoryAllocator::AllocationMode::kUsePool, this, NOT_EXECUTABLE);
|
||||
if (current_page == nullptr) return false;
|
||||
DCHECK_NOT_NULL(current_page);
|
||||
AccountCommitted(Page::kPageSize);
|
||||
IncrementCommittedPhysicalMemory(current_page->CommittedPhysicalMemory());
|
||||
memory_chunk_list_.PushBack(current_page);
|
||||
marking_state->ClearLiveness(current_page);
|
||||
current_page->SetFlags(first_page()->GetFlags(), Page::kAllFlagsMask);
|
||||
current_page->SetFlags(first_page()->GetFlags());
|
||||
heap()->CreateFillerObjectAt(current_page->area_start(),
|
||||
static_cast<int>(current_page->area_size()),
|
||||
ClearRecordedSlots::kNo);
|
||||
@ -128,8 +126,7 @@ bool SemiSpace::Commit() {
|
||||
// collector. Therefore, they must be initialized with the same FreeList as
|
||||
// old pages.
|
||||
Page* new_page = heap()->memory_allocator()->AllocatePage(
|
||||
MemoryAllocator::kUsePool,
|
||||
MemoryChunkLayout::AllocatableMemoryInDataPage(), this, NOT_EXECUTABLE);
|
||||
MemoryAllocator::AllocationMode::kUsePool, this, NOT_EXECUTABLE);
|
||||
if (new_page == nullptr) {
|
||||
if (pages_added) RewindPages(pages_added);
|
||||
DCHECK(!IsCommitted());
|
||||
@ -155,8 +152,8 @@ bool SemiSpace::Uncommit() {
|
||||
MemoryChunk* chunk = memory_chunk_list_.front();
|
||||
DecrementCommittedPhysicalMemory(chunk->CommittedPhysicalMemory());
|
||||
memory_chunk_list_.Remove(chunk);
|
||||
heap()->memory_allocator()->Free(MemoryAllocator::kConcurrentlyAndPool,
|
||||
chunk);
|
||||
heap()->memory_allocator()->Free(
|
||||
MemoryAllocator::FreeMode::kConcurrentlyAndPool, chunk);
|
||||
}
|
||||
current_page_ = nullptr;
|
||||
current_capacity_ = 0;
|
||||
@ -191,8 +188,7 @@ bool SemiSpace::GrowTo(size_t new_capacity) {
|
||||
heap()->incremental_marking()->non_atomic_marking_state();
|
||||
for (int pages_added = 0; pages_added < delta_pages; pages_added++) {
|
||||
Page* new_page = heap()->memory_allocator()->AllocatePage(
|
||||
MemoryAllocator::kUsePool,
|
||||
MemoryChunkLayout::AllocatableMemoryInDataPage(), this, NOT_EXECUTABLE);
|
||||
MemoryAllocator::AllocationMode::kUsePool, this, NOT_EXECUTABLE);
|
||||
if (new_page == nullptr) {
|
||||
if (pages_added) RewindPages(pages_added);
|
||||
return false;
|
||||
@ -215,8 +211,8 @@ void SemiSpace::RewindPages(int num_pages) {
|
||||
MemoryChunk* last = last_page();
|
||||
memory_chunk_list_.Remove(last);
|
||||
DecrementCommittedPhysicalMemory(last->CommittedPhysicalMemory());
|
||||
heap()->memory_allocator()->Free(MemoryAllocator::kConcurrentlyAndPool,
|
||||
last);
|
||||
heap()->memory_allocator()->Free(
|
||||
MemoryAllocator::FreeMode::kConcurrentlyAndPool, last);
|
||||
num_pages--;
|
||||
}
|
||||
}
|
||||
@ -278,7 +274,7 @@ void SemiSpace::RemovePage(Page* page) {
|
||||
}
|
||||
|
||||
void SemiSpace::PrependPage(Page* page) {
|
||||
page->SetFlags(current_page()->GetFlags(), Page::kAllFlagsMask);
|
||||
page->SetFlags(current_page()->GetFlags());
|
||||
page->set_owner(this);
|
||||
memory_chunk_list_.PushFront(page);
|
||||
current_capacity_ += Page::kPageSize;
|
||||
|
@ -107,7 +107,8 @@ void PagedSpace::TearDown() {
|
||||
while (!memory_chunk_list_.Empty()) {
|
||||
MemoryChunk* chunk = memory_chunk_list_.front();
|
||||
memory_chunk_list_.Remove(chunk);
|
||||
heap()->memory_allocator()->Free(MemoryAllocator::kImmediately, chunk);
|
||||
heap()->memory_allocator()->Free(MemoryAllocator::FreeMode::kImmediately,
|
||||
chunk);
|
||||
}
|
||||
accounting_stats_.Clear();
|
||||
}
|
||||
@ -351,13 +352,9 @@ void PagedSpace::ShrinkImmortalImmovablePages() {
|
||||
}
|
||||
}
|
||||
|
||||
Page* PagedSpace::AllocatePage() {
|
||||
return heap()->memory_allocator()->AllocatePage(
|
||||
MemoryAllocator::kRegular, AreaSize(), this, executable());
|
||||
}
|
||||
|
||||
Page* PagedSpace::Expand() {
|
||||
Page* page = AllocatePage();
|
||||
Page* page = heap()->memory_allocator()->AllocatePage(
|
||||
MemoryAllocator::AllocationMode::kRegular, this, executable());
|
||||
if (page == nullptr) return nullptr;
|
||||
ConcurrentAllocationMutex guard(this);
|
||||
AddPage(page);
|
||||
@ -368,7 +365,8 @@ Page* PagedSpace::Expand() {
|
||||
|
||||
base::Optional<std::pair<Address, size_t>> PagedSpace::ExpandBackground(
|
||||
size_t size_in_bytes) {
|
||||
Page* page = AllocatePage();
|
||||
Page* page = heap()->memory_allocator()->AllocatePage(
|
||||
MemoryAllocator::AllocationMode::kRegular, this, executable());
|
||||
if (page == nullptr) return {};
|
||||
base::MutexGuard lock(&space_mutex_);
|
||||
AddPage(page);
|
||||
@ -528,7 +526,8 @@ void PagedSpace::ReleasePage(Page* page) {
|
||||
AccountUncommitted(page->size());
|
||||
DecrementCommittedPhysicalMemory(page->CommittedPhysicalMemory());
|
||||
accounting_stats_.DecreaseCapacity(page->area_size());
|
||||
heap()->memory_allocator()->Free(MemoryAllocator::kConcurrently, page);
|
||||
heap()->memory_allocator()->Free(MemoryAllocator::FreeMode::kConcurrently,
|
||||
page);
|
||||
}
|
||||
|
||||
void PagedSpace::SetReadable() {
|
||||
|
@ -391,8 +391,6 @@ class V8_EXPORT_PRIVATE PagedSpace
|
||||
base::Optional<std::pair<Address, size_t>> ExpandBackground(
|
||||
size_t size_in_bytes);
|
||||
|
||||
Page* AllocatePage();
|
||||
|
||||
// Sets up a linear allocation area that fits the given number of bytes.
|
||||
// Returns false if there is not enough space and the caller has to retry
|
||||
// after collecting garbage.
|
||||
|
@ -329,6 +329,19 @@ void ReadOnlySpace::DetachPagesAndAddToArtifacts(
|
||||
artifacts->Initialize(heap->isolate(), std::move(pages_), accounting_stats_);
|
||||
}
|
||||
|
||||
ReadOnlyPage::ReadOnlyPage(Heap* heap, BaseSpace* space, size_t chunk_size,
|
||||
Address area_start, Address area_end,
|
||||
VirtualMemory reservation)
|
||||
: BasicMemoryChunk(heap, space, chunk_size, area_start, area_end,
|
||||
std::move(reservation)) {
|
||||
allocated_bytes_ = 0;
|
||||
SetFlags(Flag::NEVER_EVACUATE | Flag::READ_ONLY_HEAP);
|
||||
heap->incremental_marking()
|
||||
->non_atomic_marking_state()
|
||||
->bitmap(this)
|
||||
->MarkAllBits();
|
||||
}
|
||||
|
||||
void ReadOnlyPage::MakeHeaderRelocatable() {
|
||||
heap_ = nullptr;
|
||||
owner_ = nullptr;
|
||||
@ -612,7 +625,7 @@ void ReadOnlySpace::EnsureSpaceForAllocation(int size_in_bytes) {
|
||||
FreeLinearAllocationArea();
|
||||
|
||||
BasicMemoryChunk* chunk =
|
||||
heap()->memory_allocator()->AllocateReadOnlyPage(AreaSize(), this);
|
||||
heap()->memory_allocator()->AllocateReadOnlyPage(this);
|
||||
capacity_ += AreaSize();
|
||||
|
||||
accounting_stats_.IncreaseCapacity(chunk->area_size());
|
||||
@ -754,20 +767,6 @@ void ReadOnlySpace::ShrinkPages() {
|
||||
limit_ = pages_.back()->area_end();
|
||||
}
|
||||
|
||||
ReadOnlyPage* ReadOnlySpace::InitializePage(BasicMemoryChunk* chunk) {
|
||||
ReadOnlyPage* page = reinterpret_cast<ReadOnlyPage*>(chunk);
|
||||
page->allocated_bytes_ = 0;
|
||||
page->SetFlag(BasicMemoryChunk::Flag::NEVER_EVACUATE);
|
||||
heap()
|
||||
->incremental_marking()
|
||||
->non_atomic_marking_state()
|
||||
->bitmap(chunk)
|
||||
->MarkAllBits();
|
||||
chunk->SetFlag(BasicMemoryChunk::READ_ONLY_HEAP);
|
||||
|
||||
return page;
|
||||
}
|
||||
|
||||
SharedReadOnlySpace::SharedReadOnlySpace(
|
||||
Heap* heap, PointerCompressedReadOnlyArtifacts* artifacts)
|
||||
: SharedReadOnlySpace(heap) {
|
||||
|
@ -26,6 +26,9 @@ class SnapshotData;
|
||||
|
||||
class ReadOnlyPage : public BasicMemoryChunk {
|
||||
public:
|
||||
ReadOnlyPage(Heap* heap, BaseSpace* space, size_t chunk_size,
|
||||
Address area_start, Address area_end, VirtualMemory reservation);
|
||||
|
||||
// Clears any pointers in the header that point out of the page that would
|
||||
// otherwise make the header non-relocatable.
|
||||
void MakeHeaderRelocatable();
|
||||
@ -229,8 +232,6 @@ class ReadOnlySpace : public BaseSpace {
|
||||
// Return size of allocatable area on a page in this space.
|
||||
int AreaSize() const { return static_cast<int>(area_size_); }
|
||||
|
||||
ReadOnlyPage* InitializePage(BasicMemoryChunk* chunk);
|
||||
|
||||
Address FirstPageAddress() const { return pages_.front()->address(); }
|
||||
|
||||
protected:
|
||||
|
@ -50,6 +50,12 @@ STATIC_ASSERT(kClearedWeakHeapObjectLower32 < Page::kHeaderSize);
|
||||
// static
|
||||
constexpr Page::MainThreadFlags Page::kCopyOnFlipFlagsMask;
|
||||
|
||||
Page::Page(Heap* heap, BaseSpace* space, size_t size, Address area_start,
|
||||
Address area_end, VirtualMemory reservation,
|
||||
Executability executable)
|
||||
: MemoryChunk(heap, space, size, area_start, area_end,
|
||||
std::move(reservation), executable, PageSize::kRegular) {}
|
||||
|
||||
void Page::AllocateFreeListCategories() {
|
||||
DCHECK_NULL(categories_);
|
||||
categories_ =
|
||||
|
@ -224,6 +224,9 @@ class Page : public MemoryChunk {
|
||||
MainThreadFlags(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
|
||||
MainThreadFlags(MemoryChunk::INCREMENTAL_MARKING);
|
||||
|
||||
Page(Heap* heap, BaseSpace* space, size_t size, Address area_start,
|
||||
Address area_end, VirtualMemory reservation, Executability executable);
|
||||
|
||||
// Returns the page containing a given address. The address ranges
|
||||
// from [page_addr .. page_addr + kPageSize[. This only works if the object
|
||||
// is in fact in a page.
|
||||
|
@ -113,9 +113,8 @@ class V8_NODISCARD TestCodePageAllocatorScope {
|
||||
|
||||
static void VerifyMemoryChunk(Isolate* isolate, Heap* heap,
|
||||
v8::PageAllocator* code_page_allocator,
|
||||
size_t reserve_area_size, size_t commit_area_size,
|
||||
Executability executable, PageSize page_size,
|
||||
Space* space) {
|
||||
size_t area_size, Executability executable,
|
||||
PageSize page_size, LargeObjectSpace* space) {
|
||||
TestMemoryAllocatorScope test_allocator_scope(isolate, heap->MaxReserved());
|
||||
MemoryAllocator* memory_allocator = test_allocator_scope.allocator();
|
||||
TestCodePageAllocatorScope test_code_page_allocator_scope(
|
||||
@ -129,23 +128,23 @@ static void VerifyMemoryChunk(Isolate* isolate, Heap* heap,
|
||||
size_t guard_size =
|
||||
(executable == EXECUTABLE) ? MemoryChunkLayout::CodePageGuardSize() : 0;
|
||||
|
||||
MemoryChunk* memory_chunk = memory_allocator->AllocateChunk(
|
||||
reserve_area_size, commit_area_size, executable, page_size, space);
|
||||
MemoryChunk* memory_chunk =
|
||||
memory_allocator->AllocateLargePage(space, area_size, executable);
|
||||
size_t reserved_size =
|
||||
((executable == EXECUTABLE))
|
||||
? allocatable_memory_area_offset +
|
||||
RoundUp(reserve_area_size, page_allocator->CommitPageSize()) +
|
||||
RoundUp(area_size, page_allocator->CommitPageSize()) +
|
||||
guard_size
|
||||
: RoundUp(allocatable_memory_area_offset + reserve_area_size,
|
||||
: RoundUp(allocatable_memory_area_offset + area_size,
|
||||
page_allocator->CommitPageSize());
|
||||
CHECK(memory_chunk->size() == reserved_size);
|
||||
CHECK(memory_chunk->area_start() <
|
||||
memory_chunk->address() + memory_chunk->size());
|
||||
CHECK(memory_chunk->area_end() <=
|
||||
memory_chunk->address() + memory_chunk->size());
|
||||
CHECK(static_cast<size_t>(memory_chunk->area_size()) == commit_area_size);
|
||||
CHECK(static_cast<size_t>(memory_chunk->area_size()) == area_size);
|
||||
|
||||
memory_allocator->Free(MemoryAllocator::kImmediately, memory_chunk);
|
||||
memory_allocator->Free(MemoryAllocator::FreeMode::kImmediately, memory_chunk);
|
||||
}
|
||||
|
||||
static unsigned int PseudorandomAreaSize() {
|
||||
@ -160,12 +159,10 @@ TEST(MemoryChunk) {
|
||||
Heap* heap = isolate->heap();
|
||||
|
||||
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
|
||||
|
||||
size_t reserve_area_size = 1 * MB;
|
||||
size_t initial_commit_area_size;
|
||||
size_t area_size;
|
||||
|
||||
for (int i = 0; i < 100; i++) {
|
||||
initial_commit_area_size =
|
||||
area_size =
|
||||
RoundUp(PseudorandomAreaSize(), page_allocator->CommitPageSize());
|
||||
|
||||
// With CodeRange.
|
||||
@ -179,13 +176,11 @@ TEST(MemoryChunk) {
|
||||
code_range_reservation.size(), MemoryChunk::kAlignment,
|
||||
base::PageInitializationMode::kAllocatedPagesCanBeUninitialized);
|
||||
|
||||
VerifyMemoryChunk(isolate, heap, &code_page_allocator, reserve_area_size,
|
||||
initial_commit_area_size, EXECUTABLE, PageSize::kLarge,
|
||||
heap->code_space());
|
||||
VerifyMemoryChunk(isolate, heap, &code_page_allocator, area_size,
|
||||
EXECUTABLE, PageSize::kLarge, heap->code_lo_space());
|
||||
|
||||
VerifyMemoryChunk(isolate, heap, &code_page_allocator, reserve_area_size,
|
||||
initial_commit_area_size, NOT_EXECUTABLE,
|
||||
PageSize::kLarge, heap->old_space());
|
||||
VerifyMemoryChunk(isolate, heap, &code_page_allocator, area_size,
|
||||
NOT_EXECUTABLE, PageSize::kLarge, heap->lo_space());
|
||||
}
|
||||
}
|
||||
|
||||
@ -203,7 +198,7 @@ TEST(MemoryAllocator) {
|
||||
CHECK(!faked_space.first_page());
|
||||
CHECK(!faked_space.last_page());
|
||||
Page* first_page = memory_allocator->AllocatePage(
|
||||
MemoryAllocator::kRegular, faked_space.AreaSize(),
|
||||
MemoryAllocator::AllocationMode::kRegular,
|
||||
static_cast<PagedSpace*>(&faked_space), NOT_EXECUTABLE);
|
||||
|
||||
faked_space.memory_chunk_list().PushBack(first_page);
|
||||
@ -216,7 +211,7 @@ TEST(MemoryAllocator) {
|
||||
|
||||
// Again, we should get n or n - 1 pages.
|
||||
Page* other = memory_allocator->AllocatePage(
|
||||
MemoryAllocator::kRegular, faked_space.AreaSize(),
|
||||
MemoryAllocator::AllocationMode::kRegular,
|
||||
static_cast<PagedSpace*>(&faked_space), NOT_EXECUTABLE);
|
||||
total_pages++;
|
||||
faked_space.memory_chunk_list().PushBack(other);
|
||||
@ -813,7 +808,7 @@ TEST(NoMemoryForNewPage) {
|
||||
LinearAllocationArea allocation_info;
|
||||
OldSpace faked_space(heap, &allocation_info);
|
||||
Page* page = memory_allocator->AllocatePage(
|
||||
MemoryAllocator::kRegular, faked_space.AreaSize(),
|
||||
MemoryAllocator::AllocationMode::kRegular,
|
||||
static_cast<PagedSpace*>(&faked_space), NOT_EXECUTABLE);
|
||||
|
||||
CHECK_NULL(page);
|
||||
|
@ -311,16 +311,15 @@ bool SequentialUnmapperTest::old_flag_;
|
||||
// See v8:5945.
|
||||
TEST_F(SequentialUnmapperTest, UnmapOnTeardownAfterAlreadyFreeingPooled) {
|
||||
if (FLAG_enable_third_party_heap) return;
|
||||
Page* page = allocator()->AllocatePage(
|
||||
MemoryAllocator::kRegular,
|
||||
MemoryChunkLayout::AllocatableMemoryInDataPage(),
|
||||
static_cast<PagedSpace*>(heap()->old_space()),
|
||||
Executability::NOT_EXECUTABLE);
|
||||
Page* page =
|
||||
allocator()->AllocatePage(MemoryAllocator::AllocationMode::kRegular,
|
||||
static_cast<PagedSpace*>(heap()->old_space()),
|
||||
Executability::NOT_EXECUTABLE);
|
||||
EXPECT_NE(nullptr, page);
|
||||
const size_t page_size = tracking_page_allocator()->AllocatePageSize();
|
||||
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
|
||||
PageAllocator::kReadWrite);
|
||||
allocator()->Free(MemoryAllocator::kConcurrentlyAndPool, page);
|
||||
allocator()->Free(MemoryAllocator::FreeMode::kConcurrentlyAndPool, page);
|
||||
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
|
||||
PageAllocator::kReadWrite);
|
||||
unmapper()->FreeQueuedChunks();
|
||||
@ -341,17 +340,16 @@ TEST_F(SequentialUnmapperTest, UnmapOnTeardownAfterAlreadyFreeingPooled) {
|
||||
// See v8:5945.
|
||||
TEST_F(SequentialUnmapperTest, UnmapOnTeardown) {
|
||||
if (FLAG_enable_third_party_heap) return;
|
||||
Page* page = allocator()->AllocatePage(
|
||||
MemoryAllocator::kRegular,
|
||||
MemoryChunkLayout::AllocatableMemoryInDataPage(),
|
||||
static_cast<PagedSpace*>(heap()->old_space()),
|
||||
Executability::NOT_EXECUTABLE);
|
||||
Page* page =
|
||||
allocator()->AllocatePage(MemoryAllocator::AllocationMode::kRegular,
|
||||
static_cast<PagedSpace*>(heap()->old_space()),
|
||||
Executability::NOT_EXECUTABLE);
|
||||
EXPECT_NE(nullptr, page);
|
||||
const size_t page_size = tracking_page_allocator()->AllocatePageSize();
|
||||
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
|
||||
PageAllocator::kReadWrite);
|
||||
|
||||
allocator()->Free(MemoryAllocator::kConcurrentlyAndPool, page);
|
||||
allocator()->Free(MemoryAllocator::FreeMode::kConcurrentlyAndPool, page);
|
||||
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
|
||||
PageAllocator::kReadWrite);
|
||||
unmapper()->TearDown();
|
||||
|
Loading…
Reference in New Issue
Block a user