[heap] Split out memory-allocator.h
Splits out MemoryAllocator and CodeRangeAddressHint into memory-allocator.h Bug: v8:10473, v8:10506 Change-Id: I0855f23dd0374ddd68493ee05af7a3a00c84660d Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2203206 Auto-Submit: Dan Elphick <delphick@chromium.org> Reviewed-by: Ulan Degenbaev <ulan@chromium.org> Reviewed-by: Peter Marshall <petermarshall@chromium.org> Commit-Queue: Peter Marshall <petermarshall@chromium.org> Cr-Commit-Position: refs/heads/master@{#67857}
This commit is contained in:
parent
821d97627c
commit
dfabc70a99
2
BUILD.gn
2
BUILD.gn
@ -2458,6 +2458,8 @@ v8_source_set("v8_base_without_compiler") {
|
||||
"src/heap/marking-worklist.h",
|
||||
"src/heap/marking.cc",
|
||||
"src/heap/marking.h",
|
||||
"src/heap/memory-allocator.cc",
|
||||
"src/heap/memory-allocator.h",
|
||||
"src/heap/memory-chunk-inl.h",
|
||||
"src/heap/memory-chunk.cc",
|
||||
"src/heap/memory-chunk.h",
|
||||
|
@ -2,8 +2,6 @@
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/init/setup-isolate.h"
|
||||
|
||||
#include "src/builtins/builtins.h"
|
||||
#include "src/codegen/assembler-inl.h"
|
||||
#include "src/codegen/interface-descriptors.h"
|
||||
@ -12,7 +10,8 @@
|
||||
#include "src/compiler/code-assembler.h"
|
||||
#include "src/execution/isolate.h"
|
||||
#include "src/handles/handles-inl.h"
|
||||
#include "src/heap/heap-inl.h" // For MemoryAllocator::code_range.
|
||||
#include "src/heap/heap-inl.h" // For Heap::code_range.
|
||||
#include "src/init/setup-isolate.h"
|
||||
#include "src/interpreter/bytecodes.h"
|
||||
#include "src/interpreter/interpreter-generator.h"
|
||||
#include "src/interpreter/interpreter.h"
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include "src/execution/isolate-data.h"
|
||||
#include "src/execution/isolate.h"
|
||||
#include "src/heap/code-object-registry.h"
|
||||
#include "src/heap/memory-allocator.h"
|
||||
#include "src/heap/memory-chunk.h"
|
||||
#include "src/heap/new-spaces-inl.h"
|
||||
#include "src/heap/paged-spaces-inl.h"
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include "src/heap/incremental-marking.h"
|
||||
#include "src/heap/list.h"
|
||||
#include "src/heap/marking.h"
|
||||
#include "src/heap/memory-allocator.h"
|
||||
#include "src/heap/memory-chunk-inl.h"
|
||||
#include "src/heap/remembered-set-inl.h"
|
||||
#include "src/heap/slot-set.h"
|
||||
|
373
src/heap/memory-allocator.cc
Normal file
373
src/heap/memory-allocator.cc
Normal file
@ -0,0 +1,373 @@
|
||||
// Copyright 2020 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/heap/memory-allocator.h"
|
||||
|
||||
#include <cinttypes>
|
||||
|
||||
#include "src/execution/isolate.h"
|
||||
#include "src/heap/gc-tracer.h"
|
||||
#include "src/heap/heap-inl.h"
|
||||
#include "src/logging/log.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
static base::LazyInstance<CodeRangeAddressHint>::type code_range_address_hint =
|
||||
LAZY_INSTANCE_INITIALIZER;
|
||||
|
||||
Address CodeRangeAddressHint::GetAddressHint(size_t code_range_size) {
|
||||
base::MutexGuard guard(&mutex_);
|
||||
auto it = recently_freed_.find(code_range_size);
|
||||
if (it == recently_freed_.end() || it->second.empty()) {
|
||||
return reinterpret_cast<Address>(GetRandomMmapAddr());
|
||||
}
|
||||
Address result = it->second.back();
|
||||
it->second.pop_back();
|
||||
return result;
|
||||
}
|
||||
|
||||
void CodeRangeAddressHint::NotifyFreedCodeRange(Address code_range_start,
|
||||
size_t code_range_size) {
|
||||
base::MutexGuard guard(&mutex_);
|
||||
recently_freed_[code_range_size].push_back(code_range_start);
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// MemoryAllocator
|
||||
//
|
||||
|
||||
MemoryAllocator::MemoryAllocator(Isolate* isolate, size_t capacity,
|
||||
size_t code_range_size)
|
||||
: isolate_(isolate),
|
||||
data_page_allocator_(isolate->page_allocator()),
|
||||
code_page_allocator_(nullptr),
|
||||
capacity_(RoundUp(capacity, Page::kPageSize)),
|
||||
size_(0),
|
||||
size_executable_(0),
|
||||
lowest_ever_allocated_(static_cast<Address>(-1ll)),
|
||||
highest_ever_allocated_(kNullAddress),
|
||||
unmapper_(isolate->heap(), this) {
|
||||
InitializeCodePageAllocator(data_page_allocator_, code_range_size);
|
||||
}
|
||||
|
||||
void MemoryAllocator::InitializeCodePageAllocator(
|
||||
v8::PageAllocator* page_allocator, size_t requested) {
|
||||
DCHECK_NULL(code_page_allocator_instance_.get());
|
||||
|
||||
code_page_allocator_ = page_allocator;
|
||||
|
||||
if (requested == 0) {
|
||||
if (!isolate_->RequiresCodeRange()) return;
|
||||
// When a target requires the code range feature, we put all code objects
|
||||
// in a kMaximalCodeRangeSize range of virtual address space, so that
|
||||
// they can call each other with near calls.
|
||||
requested = kMaximalCodeRangeSize;
|
||||
} else if (requested <= kMinimumCodeRangeSize) {
|
||||
requested = kMinimumCodeRangeSize;
|
||||
}
|
||||
|
||||
const size_t reserved_area =
|
||||
kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize();
|
||||
if (requested < (kMaximalCodeRangeSize - reserved_area)) {
|
||||
requested += RoundUp(reserved_area, MemoryChunk::kPageSize);
|
||||
// Fullfilling both reserved pages requirement and huge code area
|
||||
// alignments is not supported (requires re-implementation).
|
||||
DCHECK_LE(kMinExpectedOSPageSize, page_allocator->AllocatePageSize());
|
||||
}
|
||||
DCHECK(!isolate_->RequiresCodeRange() || requested <= kMaximalCodeRangeSize);
|
||||
|
||||
Address hint =
|
||||
RoundDown(code_range_address_hint.Pointer()->GetAddressHint(requested),
|
||||
page_allocator->AllocatePageSize());
|
||||
VirtualMemory reservation(
|
||||
page_allocator, requested, reinterpret_cast<void*>(hint),
|
||||
Max(kMinExpectedOSPageSize, page_allocator->AllocatePageSize()));
|
||||
if (!reservation.IsReserved()) {
|
||||
V8::FatalProcessOutOfMemory(isolate_,
|
||||
"CodeRange setup: allocate virtual memory");
|
||||
}
|
||||
code_range_ = reservation.region();
|
||||
isolate_->AddCodeRange(code_range_.begin(), code_range_.size());
|
||||
|
||||
// We are sure that we have mapped a block of requested addresses.
|
||||
DCHECK_GE(reservation.size(), requested);
|
||||
Address base = reservation.address();
|
||||
|
||||
// On some platforms, specifically Win64, we need to reserve some pages at
|
||||
// the beginning of an executable space. See
|
||||
// https://cs.chromium.org/chromium/src/components/crash/content/
|
||||
// app/crashpad_win.cc?rcl=fd680447881449fba2edcf0589320e7253719212&l=204
|
||||
// for details.
|
||||
if (reserved_area > 0) {
|
||||
if (!reservation.SetPermissions(base, reserved_area,
|
||||
PageAllocator::kReadWrite))
|
||||
V8::FatalProcessOutOfMemory(isolate_, "CodeRange setup: set permissions");
|
||||
|
||||
base += reserved_area;
|
||||
}
|
||||
Address aligned_base = RoundUp(base, MemoryChunk::kAlignment);
|
||||
size_t size =
|
||||
RoundDown(reservation.size() - (aligned_base - base) - reserved_area,
|
||||
MemoryChunk::kPageSize);
|
||||
DCHECK(IsAligned(aligned_base, kMinExpectedOSPageSize));
|
||||
|
||||
LOG(isolate_,
|
||||
NewEvent("CodeRange", reinterpret_cast<void*>(reservation.address()),
|
||||
requested));
|
||||
|
||||
code_reservation_ = std::move(reservation);
|
||||
code_page_allocator_instance_ = std::make_unique<base::BoundedPageAllocator>(
|
||||
page_allocator, aligned_base, size,
|
||||
static_cast<size_t>(MemoryChunk::kAlignment));
|
||||
code_page_allocator_ = code_page_allocator_instance_.get();
|
||||
}
|
||||
|
||||
void MemoryAllocator::TearDown() {
|
||||
unmapper()->TearDown();
|
||||
|
||||
// Check that spaces were torn down before MemoryAllocator.
|
||||
DCHECK_EQ(size_, 0u);
|
||||
// TODO(gc) this will be true again when we fix FreeMemory.
|
||||
// DCHECK_EQ(0, size_executable_);
|
||||
capacity_ = 0;
|
||||
|
||||
if (last_chunk_.IsReserved()) {
|
||||
last_chunk_.Free();
|
||||
}
|
||||
|
||||
if (code_page_allocator_instance_.get()) {
|
||||
DCHECK(!code_range_.is_empty());
|
||||
code_range_address_hint.Pointer()->NotifyFreedCodeRange(code_range_.begin(),
|
||||
code_range_.size());
|
||||
code_range_ = base::AddressRegion();
|
||||
code_page_allocator_instance_.reset();
|
||||
}
|
||||
code_page_allocator_ = nullptr;
|
||||
data_page_allocator_ = nullptr;
|
||||
}
|
||||
|
||||
class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public CancelableTask {
|
||||
public:
|
||||
explicit UnmapFreeMemoryTask(Isolate* isolate, Unmapper* unmapper)
|
||||
: CancelableTask(isolate),
|
||||
unmapper_(unmapper),
|
||||
tracer_(isolate->heap()->tracer()) {}
|
||||
|
||||
private:
|
||||
void RunInternal() override {
|
||||
TRACE_BACKGROUND_GC(tracer_,
|
||||
GCTracer::BackgroundScope::BACKGROUND_UNMAPPER);
|
||||
unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
|
||||
unmapper_->active_unmapping_tasks_--;
|
||||
unmapper_->pending_unmapping_tasks_semaphore_.Signal();
|
||||
if (FLAG_trace_unmapper) {
|
||||
PrintIsolate(unmapper_->heap_->isolate(),
|
||||
"UnmapFreeMemoryTask Done: id=%" PRIu64 "\n", id());
|
||||
}
|
||||
}
|
||||
|
||||
Unmapper* const unmapper_;
|
||||
GCTracer* const tracer_;
|
||||
DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask);
|
||||
};
|
||||
|
||||
void MemoryAllocator::Unmapper::FreeQueuedChunks() {
|
||||
if (!heap_->IsTearingDown() && FLAG_concurrent_sweeping) {
|
||||
if (!MakeRoomForNewTasks()) {
|
||||
// kMaxUnmapperTasks are already running. Avoid creating any more.
|
||||
if (FLAG_trace_unmapper) {
|
||||
PrintIsolate(heap_->isolate(),
|
||||
"Unmapper::FreeQueuedChunks: reached task limit (%d)\n",
|
||||
kMaxUnmapperTasks);
|
||||
}
|
||||
return;
|
||||
}
|
||||
auto task = std::make_unique<UnmapFreeMemoryTask>(heap_->isolate(), this);
|
||||
if (FLAG_trace_unmapper) {
|
||||
PrintIsolate(heap_->isolate(),
|
||||
"Unmapper::FreeQueuedChunks: new task id=%" PRIu64 "\n",
|
||||
task->id());
|
||||
}
|
||||
DCHECK_LT(pending_unmapping_tasks_, kMaxUnmapperTasks);
|
||||
DCHECK_LE(active_unmapping_tasks_, pending_unmapping_tasks_);
|
||||
DCHECK_GE(active_unmapping_tasks_, 0);
|
||||
active_unmapping_tasks_++;
|
||||
task_ids_[pending_unmapping_tasks_++] = task->id();
|
||||
V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
|
||||
} else {
|
||||
PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
|
||||
}
|
||||
}
|
||||
|
||||
void MemoryAllocator::Unmapper::CancelAndWaitForPendingTasks() {
|
||||
for (int i = 0; i < pending_unmapping_tasks_; i++) {
|
||||
if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
|
||||
TryAbortResult::kTaskAborted) {
|
||||
pending_unmapping_tasks_semaphore_.Wait();
|
||||
}
|
||||
}
|
||||
pending_unmapping_tasks_ = 0;
|
||||
active_unmapping_tasks_ = 0;
|
||||
|
||||
if (FLAG_trace_unmapper) {
|
||||
PrintIsolate(
|
||||
heap_->isolate(),
|
||||
"Unmapper::CancelAndWaitForPendingTasks: no tasks remaining\n");
|
||||
}
|
||||
}
|
||||
|
||||
void MemoryAllocator::Unmapper::PrepareForGC() {
|
||||
// Free non-regular chunks because they cannot be re-used.
|
||||
PerformFreeMemoryOnQueuedNonRegularChunks();
|
||||
}
|
||||
|
||||
void MemoryAllocator::Unmapper::EnsureUnmappingCompleted() {
|
||||
CancelAndWaitForPendingTasks();
|
||||
PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
|
||||
}
|
||||
|
||||
bool MemoryAllocator::Unmapper::MakeRoomForNewTasks() {
|
||||
DCHECK_LE(pending_unmapping_tasks_, kMaxUnmapperTasks);
|
||||
|
||||
if (active_unmapping_tasks_ == 0 && pending_unmapping_tasks_ > 0) {
|
||||
// All previous unmapping tasks have been run to completion.
|
||||
// Finalize those tasks to make room for new ones.
|
||||
CancelAndWaitForPendingTasks();
|
||||
}
|
||||
return pending_unmapping_tasks_ != kMaxUnmapperTasks;
|
||||
}
|
||||
|
||||
void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedNonRegularChunks() {
|
||||
MemoryChunk* chunk = nullptr;
|
||||
while ((chunk = GetMemoryChunkSafe<kNonRegular>()) != nullptr) {
|
||||
allocator_->PerformFreeMemory(chunk);
|
||||
}
|
||||
}
|
||||
|
||||
template <MemoryAllocator::Unmapper::FreeMode mode>
|
||||
void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
|
||||
MemoryChunk* chunk = nullptr;
|
||||
if (FLAG_trace_unmapper) {
|
||||
PrintIsolate(
|
||||
heap_->isolate(),
|
||||
"Unmapper::PerformFreeMemoryOnQueuedChunks: %d queued chunks\n",
|
||||
NumberOfChunks());
|
||||
}
|
||||
// Regular chunks.
|
||||
while ((chunk = GetMemoryChunkSafe<kRegular>()) != nullptr) {
|
||||
bool pooled = chunk->IsFlagSet(MemoryChunk::POOLED);
|
||||
allocator_->PerformFreeMemory(chunk);
|
||||
if (pooled) AddMemoryChunkSafe<kPooled>(chunk);
|
||||
}
|
||||
if (mode == MemoryAllocator::Unmapper::FreeMode::kReleasePooled) {
|
||||
// The previous loop uncommitted any pages marked as pooled and added them
|
||||
// to the pooled list. In case of kReleasePooled we need to free them
|
||||
// though.
|
||||
while ((chunk = GetMemoryChunkSafe<kPooled>()) != nullptr) {
|
||||
allocator_->Free<MemoryAllocator::kAlreadyPooled>(chunk);
|
||||
}
|
||||
}
|
||||
PerformFreeMemoryOnQueuedNonRegularChunks();
|
||||
}
|
||||
|
||||
void MemoryAllocator::Unmapper::TearDown() {
|
||||
CHECK_EQ(0, pending_unmapping_tasks_);
|
||||
PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
|
||||
for (int i = 0; i < kNumberOfChunkQueues; i++) {
|
||||
DCHECK(chunks_[i].empty());
|
||||
}
|
||||
}
|
||||
|
||||
size_t MemoryAllocator::Unmapper::NumberOfCommittedChunks() {
|
||||
base::MutexGuard guard(&mutex_);
|
||||
return chunks_[kRegular].size() + chunks_[kNonRegular].size();
|
||||
}
|
||||
|
||||
int MemoryAllocator::Unmapper::NumberOfChunks() {
|
||||
base::MutexGuard guard(&mutex_);
|
||||
size_t result = 0;
|
||||
for (int i = 0; i < kNumberOfChunkQueues; i++) {
|
||||
result += chunks_[i].size();
|
||||
}
|
||||
return static_cast<int>(result);
|
||||
}
|
||||
|
||||
size_t MemoryAllocator::Unmapper::CommittedBufferedMemory() {
|
||||
base::MutexGuard guard(&mutex_);
|
||||
|
||||
size_t sum = 0;
|
||||
// kPooled chunks are already uncommited. We only have to account for
|
||||
// kRegular and kNonRegular chunks.
|
||||
for (auto& chunk : chunks_[kRegular]) {
|
||||
sum += chunk->size();
|
||||
}
|
||||
for (auto& chunk : chunks_[kNonRegular]) {
|
||||
sum += chunk->size();
|
||||
}
|
||||
return sum;
|
||||
}
|
||||
|
||||
bool MemoryAllocator::CommitMemory(VirtualMemory* reservation) {
|
||||
Address base = reservation->address();
|
||||
size_t size = reservation->size();
|
||||
if (!reservation->SetPermissions(base, size, PageAllocator::kReadWrite)) {
|
||||
return false;
|
||||
}
|
||||
UpdateAllocatedSpaceLimits(base, base + size);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool MemoryAllocator::UncommitMemory(VirtualMemory* reservation) {
|
||||
size_t size = reservation->size();
|
||||
if (!reservation->SetPermissions(reservation->address(), size,
|
||||
PageAllocator::kNoAccess)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void MemoryAllocator::FreeMemory(v8::PageAllocator* page_allocator,
|
||||
Address base, size_t size) {
|
||||
CHECK(FreePages(page_allocator, reinterpret_cast<void*>(base), size));
|
||||
}
|
||||
|
||||
Address MemoryAllocator::AllocateAlignedMemory(
|
||||
size_t reserve_size, size_t commit_size, size_t alignment,
|
||||
Executability executable, void* hint, VirtualMemory* controller) {
|
||||
v8::PageAllocator* page_allocator = this->page_allocator(executable);
|
||||
DCHECK(commit_size <= reserve_size);
|
||||
VirtualMemory reservation(page_allocator, reserve_size, hint, alignment);
|
||||
if (!reservation.IsReserved()) return kNullAddress;
|
||||
Address base = reservation.address();
|
||||
size_ += reservation.size();
|
||||
|
||||
if (executable == EXECUTABLE) {
|
||||
if (!CommitExecutableMemory(&reservation, base, commit_size,
|
||||
reserve_size)) {
|
||||
base = kNullAddress;
|
||||
}
|
||||
} else {
|
||||
if (reservation.SetPermissions(base, commit_size,
|
||||
PageAllocator::kReadWrite)) {
|
||||
UpdateAllocatedSpaceLimits(base, base + commit_size);
|
||||
} else {
|
||||
base = kNullAddress;
|
||||
}
|
||||
}
|
||||
|
||||
if (base == kNullAddress) {
|
||||
// Failed to commit the body. Free the mapping and any partially committed
|
||||
// regions inside it.
|
||||
reservation.Free();
|
||||
size_ -= reserve_size;
|
||||
return kNullAddress;
|
||||
}
|
||||
|
||||
*controller = std::move(reservation);
|
||||
return base;
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
431
src/heap/memory-allocator.h
Normal file
431
src/heap/memory-allocator.h
Normal file
@ -0,0 +1,431 @@
|
||||
// Copyright 2020 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_HEAP_MEMORY_ALLOCATOR_H_
|
||||
#define V8_HEAP_MEMORY_ALLOCATOR_H_
|
||||
|
||||
#include <atomic>
|
||||
#include <memory>
|
||||
#include <unordered_map>
|
||||
#include <unordered_set>
|
||||
#include <vector>
|
||||
|
||||
#include "src/base/bounded-page-allocator.h"
|
||||
#include "src/base/export-template.h"
|
||||
#include "src/base/macros.h"
|
||||
#include "src/base/platform/mutex.h"
|
||||
#include "src/base/platform/semaphore.h"
|
||||
#include "src/heap/heap.h"
|
||||
#include "src/heap/memory-chunk.h"
|
||||
#include "src/heap/spaces.h"
|
||||
#include "src/tasks/cancelable-task.h"
|
||||
#include "src/utils/allocation.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
class Heap;
|
||||
class Isolate;
|
||||
|
||||
// The process-wide singleton that keeps track of code range regions with the
|
||||
// intention to reuse free code range regions as a workaround for CFG memory
|
||||
// leaks (see crbug.com/870054).
|
||||
class CodeRangeAddressHint {
|
||||
public:
|
||||
// Returns the most recently freed code range start address for the given
|
||||
// size. If there is no such entry, then a random address is returned.
|
||||
V8_EXPORT_PRIVATE Address GetAddressHint(size_t code_range_size);
|
||||
|
||||
V8_EXPORT_PRIVATE void NotifyFreedCodeRange(Address code_range_start,
|
||||
size_t code_range_size);
|
||||
|
||||
private:
|
||||
base::Mutex mutex_;
|
||||
// A map from code range size to an array of recently freed code range
|
||||
// addresses. There should be O(1) different code range sizes.
|
||||
// The length of each array is limited by the peak number of code ranges,
|
||||
// which should be also O(1).
|
||||
std::unordered_map<size_t, std::vector<Address>> recently_freed_;
|
||||
};
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// A space acquires chunks of memory from the operating system. The memory
|
||||
// allocator allocates and deallocates pages for the paged heap spaces and large
|
||||
// pages for large object space.
|
||||
class MemoryAllocator {
|
||||
public:
|
||||
// Unmapper takes care of concurrently unmapping and uncommitting memory
|
||||
// chunks.
|
||||
class Unmapper {
|
||||
public:
|
||||
class UnmapFreeMemoryTask;
|
||||
|
||||
Unmapper(Heap* heap, MemoryAllocator* allocator)
|
||||
: heap_(heap),
|
||||
allocator_(allocator),
|
||||
pending_unmapping_tasks_semaphore_(0),
|
||||
pending_unmapping_tasks_(0),
|
||||
active_unmapping_tasks_(0) {
|
||||
chunks_[kRegular].reserve(kReservedQueueingSlots);
|
||||
chunks_[kPooled].reserve(kReservedQueueingSlots);
|
||||
}
|
||||
|
||||
void AddMemoryChunkSafe(MemoryChunk* chunk) {
|
||||
if (!chunk->IsLargePage() && chunk->executable() != EXECUTABLE) {
|
||||
AddMemoryChunkSafe<kRegular>(chunk);
|
||||
} else {
|
||||
AddMemoryChunkSafe<kNonRegular>(chunk);
|
||||
}
|
||||
}
|
||||
|
||||
MemoryChunk* TryGetPooledMemoryChunkSafe() {
|
||||
// Procedure:
|
||||
// (1) Try to get a chunk that was declared as pooled and already has
|
||||
// been uncommitted.
|
||||
// (2) Try to steal any memory chunk of kPageSize that would've been
|
||||
// unmapped.
|
||||
MemoryChunk* chunk = GetMemoryChunkSafe<kPooled>();
|
||||
if (chunk == nullptr) {
|
||||
chunk = GetMemoryChunkSafe<kRegular>();
|
||||
if (chunk != nullptr) {
|
||||
// For stolen chunks we need to manually free any allocated memory.
|
||||
chunk->ReleaseAllAllocatedMemory();
|
||||
}
|
||||
}
|
||||
return chunk;
|
||||
}
|
||||
|
||||
V8_EXPORT_PRIVATE void FreeQueuedChunks();
|
||||
void CancelAndWaitForPendingTasks();
|
||||
void PrepareForGC();
|
||||
V8_EXPORT_PRIVATE void EnsureUnmappingCompleted();
|
||||
V8_EXPORT_PRIVATE void TearDown();
|
||||
size_t NumberOfCommittedChunks();
|
||||
V8_EXPORT_PRIVATE int NumberOfChunks();
|
||||
size_t CommittedBufferedMemory();
|
||||
|
||||
private:
|
||||
static const int kReservedQueueingSlots = 64;
|
||||
static const int kMaxUnmapperTasks = 4;
|
||||
|
||||
enum ChunkQueueType {
|
||||
kRegular, // Pages of kPageSize that do not live in a CodeRange and
|
||||
// can thus be used for stealing.
|
||||
kNonRegular, // Large chunks and executable chunks.
|
||||
kPooled, // Pooled chunks, already uncommited and ready for reuse.
|
||||
kNumberOfChunkQueues,
|
||||
};
|
||||
|
||||
enum class FreeMode {
|
||||
kUncommitPooled,
|
||||
kReleasePooled,
|
||||
};
|
||||
|
||||
template <ChunkQueueType type>
|
||||
void AddMemoryChunkSafe(MemoryChunk* chunk) {
|
||||
base::MutexGuard guard(&mutex_);
|
||||
chunks_[type].push_back(chunk);
|
||||
}
|
||||
|
||||
template <ChunkQueueType type>
|
||||
MemoryChunk* GetMemoryChunkSafe() {
|
||||
base::MutexGuard guard(&mutex_);
|
||||
if (chunks_[type].empty()) return nullptr;
|
||||
MemoryChunk* chunk = chunks_[type].back();
|
||||
chunks_[type].pop_back();
|
||||
return chunk;
|
||||
}
|
||||
|
||||
bool MakeRoomForNewTasks();
|
||||
|
||||
template <FreeMode mode>
|
||||
void PerformFreeMemoryOnQueuedChunks();
|
||||
|
||||
void PerformFreeMemoryOnQueuedNonRegularChunks();
|
||||
|
||||
Heap* const heap_;
|
||||
MemoryAllocator* const allocator_;
|
||||
base::Mutex mutex_;
|
||||
std::vector<MemoryChunk*> chunks_[kNumberOfChunkQueues];
|
||||
CancelableTaskManager::Id task_ids_[kMaxUnmapperTasks];
|
||||
base::Semaphore pending_unmapping_tasks_semaphore_;
|
||||
intptr_t pending_unmapping_tasks_;
|
||||
std::atomic<intptr_t> active_unmapping_tasks_;
|
||||
|
||||
friend class MemoryAllocator;
|
||||
};
|
||||
|
||||
enum AllocationMode {
|
||||
kRegular,
|
||||
kPooled,
|
||||
};
|
||||
|
||||
enum FreeMode {
|
||||
kFull,
|
||||
kAlreadyPooled,
|
||||
kPreFreeAndQueue,
|
||||
kPooledAndQueue,
|
||||
};
|
||||
|
||||
V8_EXPORT_PRIVATE static intptr_t GetCommitPageSize();
|
||||
|
||||
// Computes the memory area of discardable memory within a given memory area
|
||||
// [addr, addr+size) and returns the result as base::AddressRegion. If the
|
||||
// memory is not discardable base::AddressRegion is an empty region.
|
||||
V8_EXPORT_PRIVATE static base::AddressRegion ComputeDiscardMemoryArea(
|
||||
Address addr, size_t size);
|
||||
|
||||
V8_EXPORT_PRIVATE MemoryAllocator(Isolate* isolate, size_t max_capacity,
|
||||
size_t code_range_size);
|
||||
|
||||
V8_EXPORT_PRIVATE void TearDown();
|
||||
|
||||
// Allocates a Page from the allocator. AllocationMode is used to indicate
|
||||
// whether pooled allocation, which only works for MemoryChunk::kPageSize,
|
||||
// should be tried first.
|
||||
template <MemoryAllocator::AllocationMode alloc_mode = kRegular,
|
||||
typename SpaceType>
|
||||
EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
|
||||
Page* AllocatePage(size_t size, SpaceType* owner, Executability executable);
|
||||
|
||||
LargePage* AllocateLargePage(size_t size, LargeObjectSpace* owner,
|
||||
Executability executable);
|
||||
|
||||
template <MemoryAllocator::FreeMode mode = kFull>
|
||||
EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
|
||||
void Free(MemoryChunk* chunk);
|
||||
|
||||
// Returns allocated spaces in bytes.
|
||||
size_t Size() const { return size_; }
|
||||
|
||||
// Returns allocated executable spaces in bytes.
|
||||
size_t SizeExecutable() const { return size_executable_; }
|
||||
|
||||
// Returns the maximum available bytes of heaps.
|
||||
size_t Available() const {
|
||||
const size_t size = Size();
|
||||
return capacity_ < size ? 0 : capacity_ - size;
|
||||
}
|
||||
|
||||
// Returns an indication of whether a pointer is in a space that has
|
||||
// been allocated by this MemoryAllocator.
|
||||
V8_INLINE bool IsOutsideAllocatedSpace(Address address) const {
|
||||
return address < lowest_ever_allocated_ ||
|
||||
address >= highest_ever_allocated_;
|
||||
}
|
||||
|
||||
// Returns a MemoryChunk in which the memory region from commit_area_size to
|
||||
// reserve_area_size of the chunk area is reserved but not committed, it
|
||||
// could be committed later by calling MemoryChunk::CommitArea.
|
||||
V8_EXPORT_PRIVATE MemoryChunk* AllocateChunk(size_t reserve_area_size,
|
||||
size_t commit_area_size,
|
||||
Executability executable,
|
||||
Space* space);
|
||||
|
||||
Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
|
||||
size_t alignment, Executability executable,
|
||||
void* hint, VirtualMemory* controller);
|
||||
|
||||
void FreeMemory(v8::PageAllocator* page_allocator, Address addr, size_t size);
|
||||
|
||||
// Partially release |bytes_to_free| bytes starting at |start_free|. Note that
|
||||
// internally memory is freed from |start_free| to the end of the reservation.
|
||||
// Additional memory beyond the page is not accounted though, so
|
||||
// |bytes_to_free| is computed by the caller.
|
||||
void PartialFreeMemory(MemoryChunk* chunk, Address start_free,
|
||||
size_t bytes_to_free, Address new_area_end);
|
||||
|
||||
// Checks if an allocated MemoryChunk was intended to be used for executable
|
||||
// memory.
|
||||
bool IsMemoryChunkExecutable(MemoryChunk* chunk) {
|
||||
return executable_memory_.find(chunk) != executable_memory_.end();
|
||||
}
|
||||
|
||||
// Commit memory region owned by given reservation object. Returns true if
|
||||
// it succeeded and false otherwise.
|
||||
bool CommitMemory(VirtualMemory* reservation);
|
||||
|
||||
// Uncommit memory region owned by given reservation object. Returns true if
|
||||
// it succeeded and false otherwise.
|
||||
bool UncommitMemory(VirtualMemory* reservation);
|
||||
|
||||
// Zaps a contiguous block of memory [start..(start+size)[ with
|
||||
// a given zap value.
|
||||
void ZapBlock(Address start, size_t size, uintptr_t zap_value);
|
||||
|
||||
V8_WARN_UNUSED_RESULT bool CommitExecutableMemory(VirtualMemory* vm,
|
||||
Address start,
|
||||
size_t commit_size,
|
||||
size_t reserved_size);
|
||||
|
||||
// Page allocator instance for allocating non-executable pages.
|
||||
// Guaranteed to be a valid pointer.
|
||||
v8::PageAllocator* data_page_allocator() { return data_page_allocator_; }
|
||||
|
||||
// Page allocator instance for allocating executable pages.
|
||||
// Guaranteed to be a valid pointer.
|
||||
v8::PageAllocator* code_page_allocator() { return code_page_allocator_; }
|
||||
|
||||
// Returns page allocator suitable for allocating pages with requested
|
||||
// executability.
|
||||
v8::PageAllocator* page_allocator(Executability executable) {
|
||||
return executable == EXECUTABLE ? code_page_allocator_
|
||||
: data_page_allocator_;
|
||||
}
|
||||
|
||||
// A region of memory that may contain executable code including reserved
|
||||
// OS page with read-write access in the beginning.
|
||||
const base::AddressRegion& code_range() const {
|
||||
// |code_range_| >= |optional RW pages| + |code_page_allocator_instance_|
|
||||
DCHECK_IMPLIES(!code_range_.is_empty(), code_page_allocator_instance_);
|
||||
DCHECK_IMPLIES(!code_range_.is_empty(),
|
||||
code_range_.contains(code_page_allocator_instance_->begin(),
|
||||
code_page_allocator_instance_->size()));
|
||||
return code_range_;
|
||||
}
|
||||
|
||||
Unmapper* unmapper() { return &unmapper_; }
|
||||
|
||||
// Performs all necessary bookkeeping to free the memory, but does not free
|
||||
// it.
|
||||
void UnregisterMemory(MemoryChunk* chunk);
|
||||
|
||||
private:
|
||||
void InitializeCodePageAllocator(v8::PageAllocator* page_allocator,
|
||||
size_t requested);
|
||||
|
||||
// PreFreeMemory logically frees the object, i.e., it unregisters the memory,
|
||||
// logs a delete event and adds the chunk to remembered unmapped pages.
|
||||
void PreFreeMemory(MemoryChunk* chunk);
|
||||
|
||||
// PerformFreeMemory can be called concurrently when PreFree was executed
|
||||
// before.
|
||||
void PerformFreeMemory(MemoryChunk* chunk);
|
||||
|
||||
// See AllocatePage for public interface. Note that currently we only support
|
||||
// pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize.
|
||||
template <typename SpaceType>
|
||||
MemoryChunk* AllocatePagePooled(SpaceType* owner);
|
||||
|
||||
// Initializes pages in a chunk. Returns the first page address.
|
||||
// This function and GetChunkId() are provided for the mark-compact
|
||||
// collector to rebuild page headers in the from space, which is
|
||||
// used as a marking stack and its page headers are destroyed.
|
||||
Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
|
||||
PagedSpace* owner);
|
||||
|
||||
void UpdateAllocatedSpaceLimits(Address low, Address high) {
|
||||
// The use of atomic primitives does not guarantee correctness (wrt.
|
||||
// desired semantics) by default. The loop here ensures that we update the
|
||||
// values only if they did not change in between.
|
||||
Address ptr = lowest_ever_allocated_.load(std::memory_order_relaxed);
|
||||
while ((low < ptr) && !lowest_ever_allocated_.compare_exchange_weak(
|
||||
ptr, low, std::memory_order_acq_rel)) {
|
||||
}
|
||||
ptr = highest_ever_allocated_.load(std::memory_order_relaxed);
|
||||
while ((high > ptr) && !highest_ever_allocated_.compare_exchange_weak(
|
||||
ptr, high, std::memory_order_acq_rel)) {
|
||||
}
|
||||
}
|
||||
|
||||
void RegisterExecutableMemoryChunk(MemoryChunk* chunk) {
|
||||
DCHECK(chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE));
|
||||
DCHECK_EQ(executable_memory_.find(chunk), executable_memory_.end());
|
||||
executable_memory_.insert(chunk);
|
||||
}
|
||||
|
||||
void UnregisterExecutableMemoryChunk(MemoryChunk* chunk) {
|
||||
DCHECK_NE(executable_memory_.find(chunk), executable_memory_.end());
|
||||
executable_memory_.erase(chunk);
|
||||
chunk->heap()->UnregisterUnprotectedMemoryChunk(chunk);
|
||||
}
|
||||
|
||||
Isolate* isolate_;
|
||||
|
||||
// This object controls virtual space reserved for code on the V8 heap. This
|
||||
// is only valid for 64-bit architectures where kRequiresCodeRange.
|
||||
VirtualMemory code_reservation_;
|
||||
|
||||
// Page allocator used for allocating data pages. Depending on the
|
||||
// configuration it may be a page allocator instance provided by v8::Platform
|
||||
// or a BoundedPageAllocator (when pointer compression is enabled).
|
||||
v8::PageAllocator* data_page_allocator_;
|
||||
|
||||
// Page allocator used for allocating code pages. Depending on the
|
||||
// configuration it may be a page allocator instance provided by v8::Platform
|
||||
// or a BoundedPageAllocator (when pointer compression is enabled or
|
||||
// on those 64-bit architectures where pc-relative 32-bit displacement
|
||||
// can be used for call and jump instructions).
|
||||
v8::PageAllocator* code_page_allocator_;
|
||||
|
||||
// A part of the |code_reservation_| that may contain executable code
|
||||
// including reserved page with read-write access in the beginning.
|
||||
// See details below.
|
||||
base::AddressRegion code_range_;
|
||||
|
||||
// This unique pointer owns the instance of bounded code allocator
|
||||
// that controls executable pages allocation. It does not control the
|
||||
// optionally existing page in the beginning of the |code_range_|.
|
||||
// So, summarizing all above, the following conditions hold:
|
||||
// 1) |code_reservation_| >= |code_range_|
|
||||
// 2) |code_range_| >= |optional RW pages| + |code_page_allocator_instance_|.
|
||||
// 3) |code_reservation_| is AllocatePageSize()-aligned
|
||||
// 4) |code_page_allocator_instance_| is MemoryChunk::kAlignment-aligned
|
||||
// 5) |code_range_| is CommitPageSize()-aligned
|
||||
std::unique_ptr<base::BoundedPageAllocator> code_page_allocator_instance_;
|
||||
|
||||
// Maximum space size in bytes.
|
||||
size_t capacity_;
|
||||
|
||||
// Allocated space size in bytes.
|
||||
std::atomic<size_t> size_;
|
||||
// Allocated executable space size in bytes.
|
||||
std::atomic<size_t> size_executable_;
|
||||
|
||||
// We keep the lowest and highest addresses allocated as a quick way
|
||||
// of determining that pointers are outside the heap. The estimate is
|
||||
// conservative, i.e. not all addresses in 'allocated' space are allocated
|
||||
// to our heap. The range is [lowest, highest[, inclusive on the low end
|
||||
// and exclusive on the high end.
|
||||
std::atomic<Address> lowest_ever_allocated_;
|
||||
std::atomic<Address> highest_ever_allocated_;
|
||||
|
||||
VirtualMemory last_chunk_;
|
||||
Unmapper unmapper_;
|
||||
|
||||
// Data structure to remember allocated executable memory chunks.
|
||||
std::unordered_set<MemoryChunk*> executable_memory_;
|
||||
|
||||
friend class heap::TestCodePageAllocatorScope;
|
||||
|
||||
DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
|
||||
};
|
||||
|
||||
extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
|
||||
Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
|
||||
size_t size, PagedSpace* owner, Executability executable);
|
||||
extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
|
||||
Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
|
||||
size_t size, SemiSpace* owner, Executability executable);
|
||||
extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
|
||||
Page* MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
|
||||
size_t size, SemiSpace* owner, Executability executable);
|
||||
|
||||
extern template EXPORT_TEMPLATE_DECLARE(
|
||||
V8_EXPORT_PRIVATE) void MemoryAllocator::
|
||||
Free<MemoryAllocator::kFull>(MemoryChunk* chunk);
|
||||
extern template EXPORT_TEMPLATE_DECLARE(
|
||||
V8_EXPORT_PRIVATE) void MemoryAllocator::
|
||||
Free<MemoryAllocator::kAlreadyPooled>(MemoryChunk* chunk);
|
||||
extern template EXPORT_TEMPLATE_DECLARE(
|
||||
V8_EXPORT_PRIVATE) void MemoryAllocator::
|
||||
Free<MemoryAllocator::kPreFreeAndQueue>(MemoryChunk* chunk);
|
||||
extern template EXPORT_TEMPLATE_DECLARE(
|
||||
V8_EXPORT_PRIVATE) void MemoryAllocator::
|
||||
Free<MemoryAllocator::kPooledAndQueue>(MemoryChunk* chunk);
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_HEAP_MEMORY_ALLOCATOR_H_
|
@ -7,6 +7,7 @@
|
||||
#include "src/base/platform/platform.h"
|
||||
#include "src/heap/array-buffer-tracker.h"
|
||||
#include "src/heap/code-object-registry.h"
|
||||
#include "src/heap/memory-allocator.h"
|
||||
#include "src/heap/memory-chunk-inl.h"
|
||||
#include "src/heap/spaces.h"
|
||||
#include "src/objects/heap-object.h"
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include "src/heap/heap-inl.h"
|
||||
#include "src/heap/incremental-marking.h"
|
||||
#include "src/heap/mark-compact.h"
|
||||
#include "src/heap/memory-allocator.h"
|
||||
#include "src/heap/paged-spaces.h"
|
||||
#include "src/heap/spaces-inl.h"
|
||||
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include "src/heap/array-buffer-tracker-inl.h"
|
||||
#include "src/heap/heap.h"
|
||||
#include "src/heap/incremental-marking.h"
|
||||
#include "src/heap/memory-allocator.h"
|
||||
#include "src/heap/memory-chunk-inl.h"
|
||||
#include "src/heap/paged-spaces-inl.h"
|
||||
#include "src/heap/read-only-heap.h"
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include "src/base/optional.h"
|
||||
#include "src/base/platform/mutex.h"
|
||||
#include "src/common/globals.h"
|
||||
#include "src/flags/flags.h"
|
||||
#include "src/heap/memory-chunk.h"
|
||||
#include "src/heap/spaces.h"
|
||||
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include "src/execution/isolate.h"
|
||||
#include "src/heap/combined-heap.h"
|
||||
#include "src/heap/heap-inl.h"
|
||||
#include "src/heap/memory-allocator.h"
|
||||
#include "src/heap/memory-chunk.h"
|
||||
#include "src/heap/read-only-heap.h"
|
||||
#include "src/objects/objects-inl.h"
|
||||
|
@ -18,6 +18,7 @@
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
class MemoryAllocator;
|
||||
class ReadOnlyHeap;
|
||||
|
||||
class ReadOnlyPage : public Page {
|
||||
|
@ -6,7 +6,6 @@
|
||||
#define V8_HEAP_SPACES_INL_H_
|
||||
|
||||
#include "src/base/atomic-utils.h"
|
||||
#include "src/base/bounded-page-allocator.h"
|
||||
#include "src/base/v8-fallthrough.h"
|
||||
#include "src/common/globals.h"
|
||||
#include "src/heap/heap-inl.h"
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <utility>
|
||||
|
||||
#include "src/base/bits.h"
|
||||
#include "src/base/bounded-page-allocator.h"
|
||||
#include "src/base/macros.h"
|
||||
#include "src/base/optional.h"
|
||||
#include "src/base/platform/semaphore.h"
|
||||
@ -64,361 +65,6 @@ PauseAllocationObserversScope::~PauseAllocationObserversScope() {
|
||||
}
|
||||
}
|
||||
|
||||
static base::LazyInstance<CodeRangeAddressHint>::type code_range_address_hint =
|
||||
LAZY_INSTANCE_INITIALIZER;
|
||||
|
||||
Address CodeRangeAddressHint::GetAddressHint(size_t code_range_size) {
|
||||
base::MutexGuard guard(&mutex_);
|
||||
auto it = recently_freed_.find(code_range_size);
|
||||
if (it == recently_freed_.end() || it->second.empty()) {
|
||||
return reinterpret_cast<Address>(GetRandomMmapAddr());
|
||||
}
|
||||
Address result = it->second.back();
|
||||
it->second.pop_back();
|
||||
return result;
|
||||
}
|
||||
|
||||
void CodeRangeAddressHint::NotifyFreedCodeRange(Address code_range_start,
|
||||
size_t code_range_size) {
|
||||
base::MutexGuard guard(&mutex_);
|
||||
recently_freed_[code_range_size].push_back(code_range_start);
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// MemoryAllocator
|
||||
//
|
||||
|
||||
MemoryAllocator::MemoryAllocator(Isolate* isolate, size_t capacity,
|
||||
size_t code_range_size)
|
||||
: isolate_(isolate),
|
||||
data_page_allocator_(isolate->page_allocator()),
|
||||
code_page_allocator_(nullptr),
|
||||
capacity_(RoundUp(capacity, Page::kPageSize)),
|
||||
size_(0),
|
||||
size_executable_(0),
|
||||
lowest_ever_allocated_(static_cast<Address>(-1ll)),
|
||||
highest_ever_allocated_(kNullAddress),
|
||||
unmapper_(isolate->heap(), this) {
|
||||
InitializeCodePageAllocator(data_page_allocator_, code_range_size);
|
||||
}
|
||||
|
||||
void MemoryAllocator::InitializeCodePageAllocator(
|
||||
v8::PageAllocator* page_allocator, size_t requested) {
|
||||
DCHECK_NULL(code_page_allocator_instance_.get());
|
||||
|
||||
code_page_allocator_ = page_allocator;
|
||||
|
||||
if (requested == 0) {
|
||||
if (!isolate_->RequiresCodeRange()) return;
|
||||
// When a target requires the code range feature, we put all code objects
|
||||
// in a kMaximalCodeRangeSize range of virtual address space, so that
|
||||
// they can call each other with near calls.
|
||||
requested = kMaximalCodeRangeSize;
|
||||
} else if (requested <= kMinimumCodeRangeSize) {
|
||||
requested = kMinimumCodeRangeSize;
|
||||
}
|
||||
|
||||
const size_t reserved_area =
|
||||
kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize();
|
||||
if (requested < (kMaximalCodeRangeSize - reserved_area)) {
|
||||
requested += RoundUp(reserved_area, MemoryChunk::kPageSize);
|
||||
// Fullfilling both reserved pages requirement and huge code area
|
||||
// alignments is not supported (requires re-implementation).
|
||||
DCHECK_LE(kMinExpectedOSPageSize, page_allocator->AllocatePageSize());
|
||||
}
|
||||
DCHECK(!isolate_->RequiresCodeRange() || requested <= kMaximalCodeRangeSize);
|
||||
|
||||
Address hint =
|
||||
RoundDown(code_range_address_hint.Pointer()->GetAddressHint(requested),
|
||||
page_allocator->AllocatePageSize());
|
||||
VirtualMemory reservation(
|
||||
page_allocator, requested, reinterpret_cast<void*>(hint),
|
||||
Max(kMinExpectedOSPageSize, page_allocator->AllocatePageSize()));
|
||||
if (!reservation.IsReserved()) {
|
||||
V8::FatalProcessOutOfMemory(isolate_,
|
||||
"CodeRange setup: allocate virtual memory");
|
||||
}
|
||||
code_range_ = reservation.region();
|
||||
isolate_->AddCodeRange(code_range_.begin(), code_range_.size());
|
||||
|
||||
// We are sure that we have mapped a block of requested addresses.
|
||||
DCHECK_GE(reservation.size(), requested);
|
||||
Address base = reservation.address();
|
||||
|
||||
// On some platforms, specifically Win64, we need to reserve some pages at
|
||||
// the beginning of an executable space. See
|
||||
// https://cs.chromium.org/chromium/src/components/crash/content/
|
||||
// app/crashpad_win.cc?rcl=fd680447881449fba2edcf0589320e7253719212&l=204
|
||||
// for details.
|
||||
if (reserved_area > 0) {
|
||||
if (!reservation.SetPermissions(base, reserved_area,
|
||||
PageAllocator::kReadWrite))
|
||||
V8::FatalProcessOutOfMemory(isolate_, "CodeRange setup: set permissions");
|
||||
|
||||
base += reserved_area;
|
||||
}
|
||||
Address aligned_base = RoundUp(base, MemoryChunk::kAlignment);
|
||||
size_t size =
|
||||
RoundDown(reservation.size() - (aligned_base - base) - reserved_area,
|
||||
MemoryChunk::kPageSize);
|
||||
DCHECK(IsAligned(aligned_base, kMinExpectedOSPageSize));
|
||||
|
||||
LOG(isolate_,
|
||||
NewEvent("CodeRange", reinterpret_cast<void*>(reservation.address()),
|
||||
requested));
|
||||
|
||||
code_reservation_ = std::move(reservation);
|
||||
code_page_allocator_instance_ = std::make_unique<base::BoundedPageAllocator>(
|
||||
page_allocator, aligned_base, size,
|
||||
static_cast<size_t>(MemoryChunk::kAlignment));
|
||||
code_page_allocator_ = code_page_allocator_instance_.get();
|
||||
}
|
||||
|
||||
void MemoryAllocator::TearDown() {
|
||||
unmapper()->TearDown();
|
||||
|
||||
// Check that spaces were torn down before MemoryAllocator.
|
||||
DCHECK_EQ(size_, 0u);
|
||||
// TODO(gc) this will be true again when we fix FreeMemory.
|
||||
// DCHECK_EQ(0, size_executable_);
|
||||
capacity_ = 0;
|
||||
|
||||
if (last_chunk_.IsReserved()) {
|
||||
last_chunk_.Free();
|
||||
}
|
||||
|
||||
if (code_page_allocator_instance_.get()) {
|
||||
DCHECK(!code_range_.is_empty());
|
||||
code_range_address_hint.Pointer()->NotifyFreedCodeRange(code_range_.begin(),
|
||||
code_range_.size());
|
||||
code_range_ = base::AddressRegion();
|
||||
code_page_allocator_instance_.reset();
|
||||
}
|
||||
code_page_allocator_ = nullptr;
|
||||
data_page_allocator_ = nullptr;
|
||||
}
|
||||
|
||||
class MemoryAllocator::Unmapper::UnmapFreeMemoryTask : public CancelableTask {
|
||||
public:
|
||||
explicit UnmapFreeMemoryTask(Isolate* isolate, Unmapper* unmapper)
|
||||
: CancelableTask(isolate),
|
||||
unmapper_(unmapper),
|
||||
tracer_(isolate->heap()->tracer()) {}
|
||||
|
||||
private:
|
||||
void RunInternal() override {
|
||||
TRACE_BACKGROUND_GC(tracer_,
|
||||
GCTracer::BackgroundScope::BACKGROUND_UNMAPPER);
|
||||
unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
|
||||
unmapper_->active_unmapping_tasks_--;
|
||||
unmapper_->pending_unmapping_tasks_semaphore_.Signal();
|
||||
if (FLAG_trace_unmapper) {
|
||||
PrintIsolate(unmapper_->heap_->isolate(),
|
||||
"UnmapFreeMemoryTask Done: id=%" PRIu64 "\n", id());
|
||||
}
|
||||
}
|
||||
|
||||
Unmapper* const unmapper_;
|
||||
GCTracer* const tracer_;
|
||||
DISALLOW_COPY_AND_ASSIGN(UnmapFreeMemoryTask);
|
||||
};
|
||||
|
||||
void MemoryAllocator::Unmapper::FreeQueuedChunks() {
|
||||
if (!heap_->IsTearingDown() && FLAG_concurrent_sweeping) {
|
||||
if (!MakeRoomForNewTasks()) {
|
||||
// kMaxUnmapperTasks are already running. Avoid creating any more.
|
||||
if (FLAG_trace_unmapper) {
|
||||
PrintIsolate(heap_->isolate(),
|
||||
"Unmapper::FreeQueuedChunks: reached task limit (%d)\n",
|
||||
kMaxUnmapperTasks);
|
||||
}
|
||||
return;
|
||||
}
|
||||
auto task = std::make_unique<UnmapFreeMemoryTask>(heap_->isolate(), this);
|
||||
if (FLAG_trace_unmapper) {
|
||||
PrintIsolate(heap_->isolate(),
|
||||
"Unmapper::FreeQueuedChunks: new task id=%" PRIu64 "\n",
|
||||
task->id());
|
||||
}
|
||||
DCHECK_LT(pending_unmapping_tasks_, kMaxUnmapperTasks);
|
||||
DCHECK_LE(active_unmapping_tasks_, pending_unmapping_tasks_);
|
||||
DCHECK_GE(active_unmapping_tasks_, 0);
|
||||
active_unmapping_tasks_++;
|
||||
task_ids_[pending_unmapping_tasks_++] = task->id();
|
||||
V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
|
||||
} else {
|
||||
PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
|
||||
}
|
||||
}
|
||||
|
||||
void MemoryAllocator::Unmapper::CancelAndWaitForPendingTasks() {
|
||||
for (int i = 0; i < pending_unmapping_tasks_; i++) {
|
||||
if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
|
||||
TryAbortResult::kTaskAborted) {
|
||||
pending_unmapping_tasks_semaphore_.Wait();
|
||||
}
|
||||
}
|
||||
pending_unmapping_tasks_ = 0;
|
||||
active_unmapping_tasks_ = 0;
|
||||
|
||||
if (FLAG_trace_unmapper) {
|
||||
PrintIsolate(
|
||||
heap_->isolate(),
|
||||
"Unmapper::CancelAndWaitForPendingTasks: no tasks remaining\n");
|
||||
}
|
||||
}
|
||||
|
||||
void MemoryAllocator::Unmapper::PrepareForGC() {
|
||||
// Free non-regular chunks because they cannot be re-used.
|
||||
PerformFreeMemoryOnQueuedNonRegularChunks();
|
||||
}
|
||||
|
||||
void MemoryAllocator::Unmapper::EnsureUnmappingCompleted() {
|
||||
CancelAndWaitForPendingTasks();
|
||||
PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
|
||||
}
|
||||
|
||||
bool MemoryAllocator::Unmapper::MakeRoomForNewTasks() {
|
||||
DCHECK_LE(pending_unmapping_tasks_, kMaxUnmapperTasks);
|
||||
|
||||
if (active_unmapping_tasks_ == 0 && pending_unmapping_tasks_ > 0) {
|
||||
// All previous unmapping tasks have been run to completion.
|
||||
// Finalize those tasks to make room for new ones.
|
||||
CancelAndWaitForPendingTasks();
|
||||
}
|
||||
return pending_unmapping_tasks_ != kMaxUnmapperTasks;
|
||||
}
|
||||
|
||||
void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedNonRegularChunks() {
|
||||
MemoryChunk* chunk = nullptr;
|
||||
while ((chunk = GetMemoryChunkSafe<kNonRegular>()) != nullptr) {
|
||||
allocator_->PerformFreeMemory(chunk);
|
||||
}
|
||||
}
|
||||
|
||||
template <MemoryAllocator::Unmapper::FreeMode mode>
|
||||
void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
|
||||
MemoryChunk* chunk = nullptr;
|
||||
if (FLAG_trace_unmapper) {
|
||||
PrintIsolate(
|
||||
heap_->isolate(),
|
||||
"Unmapper::PerformFreeMemoryOnQueuedChunks: %d queued chunks\n",
|
||||
NumberOfChunks());
|
||||
}
|
||||
// Regular chunks.
|
||||
while ((chunk = GetMemoryChunkSafe<kRegular>()) != nullptr) {
|
||||
bool pooled = chunk->IsFlagSet(MemoryChunk::POOLED);
|
||||
allocator_->PerformFreeMemory(chunk);
|
||||
if (pooled) AddMemoryChunkSafe<kPooled>(chunk);
|
||||
}
|
||||
if (mode == MemoryAllocator::Unmapper::FreeMode::kReleasePooled) {
|
||||
// The previous loop uncommitted any pages marked as pooled and added them
|
||||
// to the pooled list. In case of kReleasePooled we need to free them
|
||||
// though.
|
||||
while ((chunk = GetMemoryChunkSafe<kPooled>()) != nullptr) {
|
||||
allocator_->Free<MemoryAllocator::kAlreadyPooled>(chunk);
|
||||
}
|
||||
}
|
||||
PerformFreeMemoryOnQueuedNonRegularChunks();
|
||||
}
|
||||
|
||||
void MemoryAllocator::Unmapper::TearDown() {
|
||||
CHECK_EQ(0, pending_unmapping_tasks_);
|
||||
PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
|
||||
for (int i = 0; i < kNumberOfChunkQueues; i++) {
|
||||
DCHECK(chunks_[i].empty());
|
||||
}
|
||||
}
|
||||
|
||||
size_t MemoryAllocator::Unmapper::NumberOfCommittedChunks() {
|
||||
base::MutexGuard guard(&mutex_);
|
||||
return chunks_[kRegular].size() + chunks_[kNonRegular].size();
|
||||
}
|
||||
|
||||
int MemoryAllocator::Unmapper::NumberOfChunks() {
|
||||
base::MutexGuard guard(&mutex_);
|
||||
size_t result = 0;
|
||||
for (int i = 0; i < kNumberOfChunkQueues; i++) {
|
||||
result += chunks_[i].size();
|
||||
}
|
||||
return static_cast<int>(result);
|
||||
}
|
||||
|
||||
size_t MemoryAllocator::Unmapper::CommittedBufferedMemory() {
|
||||
base::MutexGuard guard(&mutex_);
|
||||
|
||||
size_t sum = 0;
|
||||
// kPooled chunks are already uncommited. We only have to account for
|
||||
// kRegular and kNonRegular chunks.
|
||||
for (auto& chunk : chunks_[kRegular]) {
|
||||
sum += chunk->size();
|
||||
}
|
||||
for (auto& chunk : chunks_[kNonRegular]) {
|
||||
sum += chunk->size();
|
||||
}
|
||||
return sum;
|
||||
}
|
||||
|
||||
bool MemoryAllocator::CommitMemory(VirtualMemory* reservation) {
|
||||
Address base = reservation->address();
|
||||
size_t size = reservation->size();
|
||||
if (!reservation->SetPermissions(base, size, PageAllocator::kReadWrite)) {
|
||||
return false;
|
||||
}
|
||||
UpdateAllocatedSpaceLimits(base, base + size);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool MemoryAllocator::UncommitMemory(VirtualMemory* reservation) {
|
||||
size_t size = reservation->size();
|
||||
if (!reservation->SetPermissions(reservation->address(), size,
|
||||
PageAllocator::kNoAccess)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void MemoryAllocator::FreeMemory(v8::PageAllocator* page_allocator,
|
||||
Address base, size_t size) {
|
||||
CHECK(FreePages(page_allocator, reinterpret_cast<void*>(base), size));
|
||||
}
|
||||
|
||||
Address MemoryAllocator::AllocateAlignedMemory(
|
||||
size_t reserve_size, size_t commit_size, size_t alignment,
|
||||
Executability executable, void* hint, VirtualMemory* controller) {
|
||||
v8::PageAllocator* page_allocator = this->page_allocator(executable);
|
||||
DCHECK(commit_size <= reserve_size);
|
||||
VirtualMemory reservation(page_allocator, reserve_size, hint, alignment);
|
||||
if (!reservation.IsReserved()) return kNullAddress;
|
||||
Address base = reservation.address();
|
||||
size_ += reservation.size();
|
||||
|
||||
if (executable == EXECUTABLE) {
|
||||
if (!CommitExecutableMemory(&reservation, base, commit_size,
|
||||
reserve_size)) {
|
||||
base = kNullAddress;
|
||||
}
|
||||
} else {
|
||||
if (reservation.SetPermissions(base, commit_size,
|
||||
PageAllocator::kReadWrite)) {
|
||||
UpdateAllocatedSpaceLimits(base, base + commit_size);
|
||||
} else {
|
||||
base = kNullAddress;
|
||||
}
|
||||
}
|
||||
|
||||
if (base == kNullAddress) {
|
||||
// Failed to commit the body. Free the mapping and any partially committed
|
||||
// regions inside it.
|
||||
reservation.Free();
|
||||
size_ -= reserve_size;
|
||||
return kNullAddress;
|
||||
}
|
||||
|
||||
*controller = std::move(reservation);
|
||||
return base;
|
||||
}
|
||||
|
||||
void Page::AllocateFreeListCategories() {
|
||||
DCHECK_NULL(categories_);
|
||||
categories_ =
|
||||
|
@ -8,29 +8,19 @@
|
||||
#include <atomic>
|
||||
#include <memory>
|
||||
#include <unordered_map>
|
||||
#include <unordered_set>
|
||||
#include <vector>
|
||||
|
||||
#include "src/base/atomic-utils.h"
|
||||
#include "src/base/bounded-page-allocator.h"
|
||||
#include "src/base/export-template.h"
|
||||
#include "src/base/iterator.h"
|
||||
#include "src/base/macros.h"
|
||||
#include "src/base/platform/mutex.h"
|
||||
#include "src/common/globals.h"
|
||||
#include "src/flags/flags.h"
|
||||
#include "src/heap/basic-memory-chunk.h"
|
||||
#include "src/heap/heap.h"
|
||||
#include "src/heap/invalidated-slots.h"
|
||||
#include "src/heap/list.h"
|
||||
#include "src/heap/marking.h"
|
||||
#include "src/heap/memory-chunk.h"
|
||||
#include "src/heap/slot-set.h"
|
||||
#include "src/objects/free-space.h"
|
||||
#include "src/objects/heap-object.h"
|
||||
#include "src/objects/map.h"
|
||||
#include "src/objects/objects.h"
|
||||
#include "src/tasks/cancelable-task.h"
|
||||
#include "src/utils/allocation.h"
|
||||
#include "src/utils/utils.h"
|
||||
#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
|
||||
@ -657,403 +647,6 @@ STATIC_ASSERT(sizeof(BasicMemoryChunk) <= BasicMemoryChunk::kHeaderSize);
|
||||
STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
|
||||
STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
|
||||
|
||||
// The process-wide singleton that keeps track of code range regions with the
|
||||
// intention to reuse free code range regions as a workaround for CFG memory
|
||||
// leaks (see crbug.com/870054).
|
||||
class CodeRangeAddressHint {
|
||||
public:
|
||||
// Returns the most recently freed code range start address for the given
|
||||
// size. If there is no such entry, then a random address is returned.
|
||||
V8_EXPORT_PRIVATE Address GetAddressHint(size_t code_range_size);
|
||||
|
||||
V8_EXPORT_PRIVATE void NotifyFreedCodeRange(Address code_range_start,
|
||||
size_t code_range_size);
|
||||
|
||||
private:
|
||||
base::Mutex mutex_;
|
||||
// A map from code range size to an array of recently freed code range
|
||||
// addresses. There should be O(1) different code range sizes.
|
||||
// The length of each array is limited by the peak number of code ranges,
|
||||
// which should be also O(1).
|
||||
std::unordered_map<size_t, std::vector<Address>> recently_freed_;
|
||||
};
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// A space acquires chunks of memory from the operating system. The memory
|
||||
// allocator allocates and deallocates pages for the paged heap spaces and large
|
||||
// pages for large object space.
|
||||
class MemoryAllocator {
|
||||
public:
|
||||
// Unmapper takes care of concurrently unmapping and uncommitting memory
|
||||
// chunks.
|
||||
class Unmapper {
|
||||
public:
|
||||
class UnmapFreeMemoryTask;
|
||||
|
||||
Unmapper(Heap* heap, MemoryAllocator* allocator)
|
||||
: heap_(heap),
|
||||
allocator_(allocator),
|
||||
pending_unmapping_tasks_semaphore_(0),
|
||||
pending_unmapping_tasks_(0),
|
||||
active_unmapping_tasks_(0) {
|
||||
chunks_[kRegular].reserve(kReservedQueueingSlots);
|
||||
chunks_[kPooled].reserve(kReservedQueueingSlots);
|
||||
}
|
||||
|
||||
void AddMemoryChunkSafe(MemoryChunk* chunk) {
|
||||
if (!chunk->IsLargePage() && chunk->executable() != EXECUTABLE) {
|
||||
AddMemoryChunkSafe<kRegular>(chunk);
|
||||
} else {
|
||||
AddMemoryChunkSafe<kNonRegular>(chunk);
|
||||
}
|
||||
}
|
||||
|
||||
MemoryChunk* TryGetPooledMemoryChunkSafe() {
|
||||
// Procedure:
|
||||
// (1) Try to get a chunk that was declared as pooled and already has
|
||||
// been uncommitted.
|
||||
// (2) Try to steal any memory chunk of kPageSize that would've been
|
||||
// unmapped.
|
||||
MemoryChunk* chunk = GetMemoryChunkSafe<kPooled>();
|
||||
if (chunk == nullptr) {
|
||||
chunk = GetMemoryChunkSafe<kRegular>();
|
||||
if (chunk != nullptr) {
|
||||
// For stolen chunks we need to manually free any allocated memory.
|
||||
chunk->ReleaseAllAllocatedMemory();
|
||||
}
|
||||
}
|
||||
return chunk;
|
||||
}
|
||||
|
||||
V8_EXPORT_PRIVATE void FreeQueuedChunks();
|
||||
void CancelAndWaitForPendingTasks();
|
||||
void PrepareForGC();
|
||||
V8_EXPORT_PRIVATE void EnsureUnmappingCompleted();
|
||||
V8_EXPORT_PRIVATE void TearDown();
|
||||
size_t NumberOfCommittedChunks();
|
||||
V8_EXPORT_PRIVATE int NumberOfChunks();
|
||||
size_t CommittedBufferedMemory();
|
||||
|
||||
private:
|
||||
static const int kReservedQueueingSlots = 64;
|
||||
static const int kMaxUnmapperTasks = 4;
|
||||
|
||||
enum ChunkQueueType {
|
||||
kRegular, // Pages of kPageSize that do not live in a CodeRange and
|
||||
// can thus be used for stealing.
|
||||
kNonRegular, // Large chunks and executable chunks.
|
||||
kPooled, // Pooled chunks, already uncommited and ready for reuse.
|
||||
kNumberOfChunkQueues,
|
||||
};
|
||||
|
||||
enum class FreeMode {
|
||||
kUncommitPooled,
|
||||
kReleasePooled,
|
||||
};
|
||||
|
||||
template <ChunkQueueType type>
|
||||
void AddMemoryChunkSafe(MemoryChunk* chunk) {
|
||||
base::MutexGuard guard(&mutex_);
|
||||
chunks_[type].push_back(chunk);
|
||||
}
|
||||
|
||||
template <ChunkQueueType type>
|
||||
MemoryChunk* GetMemoryChunkSafe() {
|
||||
base::MutexGuard guard(&mutex_);
|
||||
if (chunks_[type].empty()) return nullptr;
|
||||
MemoryChunk* chunk = chunks_[type].back();
|
||||
chunks_[type].pop_back();
|
||||
return chunk;
|
||||
}
|
||||
|
||||
bool MakeRoomForNewTasks();
|
||||
|
||||
template <FreeMode mode>
|
||||
void PerformFreeMemoryOnQueuedChunks();
|
||||
|
||||
void PerformFreeMemoryOnQueuedNonRegularChunks();
|
||||
|
||||
Heap* const heap_;
|
||||
MemoryAllocator* const allocator_;
|
||||
base::Mutex mutex_;
|
||||
std::vector<MemoryChunk*> chunks_[kNumberOfChunkQueues];
|
||||
CancelableTaskManager::Id task_ids_[kMaxUnmapperTasks];
|
||||
base::Semaphore pending_unmapping_tasks_semaphore_;
|
||||
intptr_t pending_unmapping_tasks_;
|
||||
std::atomic<intptr_t> active_unmapping_tasks_;
|
||||
|
||||
friend class MemoryAllocator;
|
||||
};
|
||||
|
||||
enum AllocationMode {
|
||||
kRegular,
|
||||
kPooled,
|
||||
};
|
||||
|
||||
enum FreeMode {
|
||||
kFull,
|
||||
kAlreadyPooled,
|
||||
kPreFreeAndQueue,
|
||||
kPooledAndQueue,
|
||||
};
|
||||
|
||||
V8_EXPORT_PRIVATE static intptr_t GetCommitPageSize();
|
||||
|
||||
// Computes the memory area of discardable memory within a given memory area
|
||||
// [addr, addr+size) and returns the result as base::AddressRegion. If the
|
||||
// memory is not discardable base::AddressRegion is an empty region.
|
||||
V8_EXPORT_PRIVATE static base::AddressRegion ComputeDiscardMemoryArea(
|
||||
Address addr, size_t size);
|
||||
|
||||
V8_EXPORT_PRIVATE MemoryAllocator(Isolate* isolate, size_t max_capacity,
|
||||
size_t code_range_size);
|
||||
|
||||
V8_EXPORT_PRIVATE void TearDown();
|
||||
|
||||
// Allocates a Page from the allocator. AllocationMode is used to indicate
|
||||
// whether pooled allocation, which only works for MemoryChunk::kPageSize,
|
||||
// should be tried first.
|
||||
template <MemoryAllocator::AllocationMode alloc_mode = kRegular,
|
||||
typename SpaceType>
|
||||
EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
|
||||
Page* AllocatePage(size_t size, SpaceType* owner, Executability executable);
|
||||
|
||||
LargePage* AllocateLargePage(size_t size, LargeObjectSpace* owner,
|
||||
Executability executable);
|
||||
|
||||
template <MemoryAllocator::FreeMode mode = kFull>
|
||||
EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
|
||||
void Free(MemoryChunk* chunk);
|
||||
|
||||
// Returns allocated spaces in bytes.
|
||||
size_t Size() const { return size_; }
|
||||
|
||||
// Returns allocated executable spaces in bytes.
|
||||
size_t SizeExecutable() const { return size_executable_; }
|
||||
|
||||
// Returns the maximum available bytes of heaps.
|
||||
size_t Available() const {
|
||||
const size_t size = Size();
|
||||
return capacity_ < size ? 0 : capacity_ - size;
|
||||
}
|
||||
|
||||
// Returns an indication of whether a pointer is in a space that has
|
||||
// been allocated by this MemoryAllocator.
|
||||
V8_INLINE bool IsOutsideAllocatedSpace(Address address) const {
|
||||
return address < lowest_ever_allocated_ ||
|
||||
address >= highest_ever_allocated_;
|
||||
}
|
||||
|
||||
// Returns a MemoryChunk in which the memory region from commit_area_size to
|
||||
// reserve_area_size of the chunk area is reserved but not committed, it
|
||||
// could be committed later by calling MemoryChunk::CommitArea.
|
||||
V8_EXPORT_PRIVATE MemoryChunk* AllocateChunk(size_t reserve_area_size,
|
||||
size_t commit_area_size,
|
||||
Executability executable,
|
||||
Space* space);
|
||||
|
||||
Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
|
||||
size_t alignment, Executability executable,
|
||||
void* hint, VirtualMemory* controller);
|
||||
|
||||
void FreeMemory(v8::PageAllocator* page_allocator, Address addr, size_t size);
|
||||
|
||||
// Partially release |bytes_to_free| bytes starting at |start_free|. Note that
|
||||
// internally memory is freed from |start_free| to the end of the reservation.
|
||||
// Additional memory beyond the page is not accounted though, so
|
||||
// |bytes_to_free| is computed by the caller.
|
||||
void PartialFreeMemory(MemoryChunk* chunk, Address start_free,
|
||||
size_t bytes_to_free, Address new_area_end);
|
||||
|
||||
// Checks if an allocated MemoryChunk was intended to be used for executable
|
||||
// memory.
|
||||
bool IsMemoryChunkExecutable(MemoryChunk* chunk) {
|
||||
return executable_memory_.find(chunk) != executable_memory_.end();
|
||||
}
|
||||
|
||||
// Commit memory region owned by given reservation object. Returns true if
|
||||
// it succeeded and false otherwise.
|
||||
bool CommitMemory(VirtualMemory* reservation);
|
||||
|
||||
// Uncommit memory region owned by given reservation object. Returns true if
|
||||
// it succeeded and false otherwise.
|
||||
bool UncommitMemory(VirtualMemory* reservation);
|
||||
|
||||
// Zaps a contiguous block of memory [start..(start+size)[ with
|
||||
// a given zap value.
|
||||
void ZapBlock(Address start, size_t size, uintptr_t zap_value);
|
||||
|
||||
V8_WARN_UNUSED_RESULT bool CommitExecutableMemory(VirtualMemory* vm,
|
||||
Address start,
|
||||
size_t commit_size,
|
||||
size_t reserved_size);
|
||||
|
||||
// Page allocator instance for allocating non-executable pages.
|
||||
// Guaranteed to be a valid pointer.
|
||||
v8::PageAllocator* data_page_allocator() { return data_page_allocator_; }
|
||||
|
||||
// Page allocator instance for allocating executable pages.
|
||||
// Guaranteed to be a valid pointer.
|
||||
v8::PageAllocator* code_page_allocator() { return code_page_allocator_; }
|
||||
|
||||
// Returns page allocator suitable for allocating pages with requested
|
||||
// executability.
|
||||
v8::PageAllocator* page_allocator(Executability executable) {
|
||||
return executable == EXECUTABLE ? code_page_allocator_
|
||||
: data_page_allocator_;
|
||||
}
|
||||
|
||||
// A region of memory that may contain executable code including reserved
|
||||
// OS page with read-write access in the beginning.
|
||||
const base::AddressRegion& code_range() const {
|
||||
// |code_range_| >= |optional RW pages| + |code_page_allocator_instance_|
|
||||
DCHECK_IMPLIES(!code_range_.is_empty(), code_page_allocator_instance_);
|
||||
DCHECK_IMPLIES(!code_range_.is_empty(),
|
||||
code_range_.contains(code_page_allocator_instance_->begin(),
|
||||
code_page_allocator_instance_->size()));
|
||||
return code_range_;
|
||||
}
|
||||
|
||||
Unmapper* unmapper() { return &unmapper_; }
|
||||
|
||||
// Performs all necessary bookkeeping to free the memory, but does not free
|
||||
// it.
|
||||
void UnregisterMemory(MemoryChunk* chunk);
|
||||
|
||||
private:
|
||||
void InitializeCodePageAllocator(v8::PageAllocator* page_allocator,
|
||||
size_t requested);
|
||||
|
||||
// PreFreeMemory logically frees the object, i.e., it unregisters the memory,
|
||||
// logs a delete event and adds the chunk to remembered unmapped pages.
|
||||
void PreFreeMemory(MemoryChunk* chunk);
|
||||
|
||||
// PerformFreeMemory can be called concurrently when PreFree was executed
|
||||
// before.
|
||||
void PerformFreeMemory(MemoryChunk* chunk);
|
||||
|
||||
// See AllocatePage for public interface. Note that currently we only support
|
||||
// pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize.
|
||||
template <typename SpaceType>
|
||||
MemoryChunk* AllocatePagePooled(SpaceType* owner);
|
||||
|
||||
// Initializes pages in a chunk. Returns the first page address.
|
||||
// This function and GetChunkId() are provided for the mark-compact
|
||||
// collector to rebuild page headers in the from space, which is
|
||||
// used as a marking stack and its page headers are destroyed.
|
||||
Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
|
||||
PagedSpace* owner);
|
||||
|
||||
void UpdateAllocatedSpaceLimits(Address low, Address high) {
|
||||
// The use of atomic primitives does not guarantee correctness (wrt.
|
||||
// desired semantics) by default. The loop here ensures that we update the
|
||||
// values only if they did not change in between.
|
||||
Address ptr = lowest_ever_allocated_.load(std::memory_order_relaxed);
|
||||
while ((low < ptr) && !lowest_ever_allocated_.compare_exchange_weak(
|
||||
ptr, low, std::memory_order_acq_rel)) {
|
||||
}
|
||||
ptr = highest_ever_allocated_.load(std::memory_order_relaxed);
|
||||
while ((high > ptr) && !highest_ever_allocated_.compare_exchange_weak(
|
||||
ptr, high, std::memory_order_acq_rel)) {
|
||||
}
|
||||
}
|
||||
|
||||
void RegisterExecutableMemoryChunk(MemoryChunk* chunk) {
|
||||
DCHECK(chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE));
|
||||
DCHECK_EQ(executable_memory_.find(chunk), executable_memory_.end());
|
||||
executable_memory_.insert(chunk);
|
||||
}
|
||||
|
||||
void UnregisterExecutableMemoryChunk(MemoryChunk* chunk) {
|
||||
DCHECK_NE(executable_memory_.find(chunk), executable_memory_.end());
|
||||
executable_memory_.erase(chunk);
|
||||
chunk->heap()->UnregisterUnprotectedMemoryChunk(chunk);
|
||||
}
|
||||
|
||||
Isolate* isolate_;
|
||||
|
||||
// This object controls virtual space reserved for code on the V8 heap. This
|
||||
// is only valid for 64-bit architectures where kRequiresCodeRange.
|
||||
VirtualMemory code_reservation_;
|
||||
|
||||
// Page allocator used for allocating data pages. Depending on the
|
||||
// configuration it may be a page allocator instance provided by v8::Platform
|
||||
// or a BoundedPageAllocator (when pointer compression is enabled).
|
||||
v8::PageAllocator* data_page_allocator_;
|
||||
|
||||
// Page allocator used for allocating code pages. Depending on the
|
||||
// configuration it may be a page allocator instance provided by v8::Platform
|
||||
// or a BoundedPageAllocator (when pointer compression is enabled or
|
||||
// on those 64-bit architectures where pc-relative 32-bit displacement
|
||||
// can be used for call and jump instructions).
|
||||
v8::PageAllocator* code_page_allocator_;
|
||||
|
||||
// A part of the |code_reservation_| that may contain executable code
|
||||
// including reserved page with read-write access in the beginning.
|
||||
// See details below.
|
||||
base::AddressRegion code_range_;
|
||||
|
||||
// This unique pointer owns the instance of bounded code allocator
|
||||
// that controls executable pages allocation. It does not control the
|
||||
// optionally existing page in the beginning of the |code_range_|.
|
||||
// So, summarizing all above, the following conditions hold:
|
||||
// 1) |code_reservation_| >= |code_range_|
|
||||
// 2) |code_range_| >= |optional RW pages| + |code_page_allocator_instance_|.
|
||||
// 3) |code_reservation_| is AllocatePageSize()-aligned
|
||||
// 4) |code_page_allocator_instance_| is MemoryChunk::kAlignment-aligned
|
||||
// 5) |code_range_| is CommitPageSize()-aligned
|
||||
std::unique_ptr<base::BoundedPageAllocator> code_page_allocator_instance_;
|
||||
|
||||
// Maximum space size in bytes.
|
||||
size_t capacity_;
|
||||
|
||||
// Allocated space size in bytes.
|
||||
std::atomic<size_t> size_;
|
||||
// Allocated executable space size in bytes.
|
||||
std::atomic<size_t> size_executable_;
|
||||
|
||||
// We keep the lowest and highest addresses allocated as a quick way
|
||||
// of determining that pointers are outside the heap. The estimate is
|
||||
// conservative, i.e. not all addresses in 'allocated' space are allocated
|
||||
// to our heap. The range is [lowest, highest[, inclusive on the low end
|
||||
// and exclusive on the high end.
|
||||
std::atomic<Address> lowest_ever_allocated_;
|
||||
std::atomic<Address> highest_ever_allocated_;
|
||||
|
||||
VirtualMemory last_chunk_;
|
||||
Unmapper unmapper_;
|
||||
|
||||
// Data structure to remember allocated executable memory chunks.
|
||||
std::unordered_set<MemoryChunk*> executable_memory_;
|
||||
|
||||
friend class heap::TestCodePageAllocatorScope;
|
||||
|
||||
DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
|
||||
};
|
||||
|
||||
extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
|
||||
Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
|
||||
size_t size, PagedSpace* owner, Executability executable);
|
||||
extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
|
||||
Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
|
||||
size_t size, SemiSpace* owner, Executability executable);
|
||||
extern template EXPORT_TEMPLATE_DECLARE(V8_EXPORT_PRIVATE)
|
||||
Page* MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
|
||||
size_t size, SemiSpace* owner, Executability executable);
|
||||
|
||||
extern template EXPORT_TEMPLATE_DECLARE(
|
||||
V8_EXPORT_PRIVATE) void MemoryAllocator::
|
||||
Free<MemoryAllocator::kFull>(MemoryChunk* chunk);
|
||||
extern template EXPORT_TEMPLATE_DECLARE(
|
||||
V8_EXPORT_PRIVATE) void MemoryAllocator::
|
||||
Free<MemoryAllocator::kAlreadyPooled>(MemoryChunk* chunk);
|
||||
extern template EXPORT_TEMPLATE_DECLARE(
|
||||
V8_EXPORT_PRIVATE) void MemoryAllocator::
|
||||
Free<MemoryAllocator::kPreFreeAndQueue>(MemoryChunk* chunk);
|
||||
extern template EXPORT_TEMPLATE_DECLARE(
|
||||
V8_EXPORT_PRIVATE) void MemoryAllocator::
|
||||
Free<MemoryAllocator::kPooledAndQueue>(MemoryChunk* chunk);
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Interface for heap object iterator to be implemented by all object space
|
||||
// object iterators.
|
||||
|
@ -10,7 +10,7 @@
|
||||
#include "src/execution/frames-inl.h"
|
||||
#include "src/execution/simulator.h"
|
||||
#include "src/execution/vm-state-inl.h"
|
||||
#include "src/heap/heap-inl.h" // For MemoryAllocator::code_range.
|
||||
#include "src/heap/heap-inl.h" // For Heap::code_range.
|
||||
#include "src/logging/counters.h"
|
||||
#include "src/sanitizer/asan.h"
|
||||
#include "src/sanitizer/msan.h"
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include "src/base/platform/platform.h"
|
||||
#include "src/heap/factory.h"
|
||||
#include "src/heap/large-spaces.h"
|
||||
#include "src/heap/memory-allocator.h"
|
||||
#include "src/heap/memory-chunk.h"
|
||||
#include "src/heap/spaces-inl.h"
|
||||
#include "src/heap/spaces.h"
|
||||
|
@ -2,9 +2,11 @@
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/init/v8.h"
|
||||
#include <vector>
|
||||
|
||||
#include "src/heap/spaces.h"
|
||||
#include "src/heap/heap.h"
|
||||
#include "src/heap/memory-allocator.h"
|
||||
#include "src/init/v8.h"
|
||||
#include "test/cctest/cctest.h"
|
||||
#include "test/cctest/heap/heap-utils.h"
|
||||
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include "src/execution/isolate.h"
|
||||
#include "src/handles/handles-inl.h"
|
||||
#include "src/heap/factory.h"
|
||||
#include "src/heap/memory-allocator.h"
|
||||
#include "src/heap/spaces.h"
|
||||
#include "src/libsampler/sampler.h"
|
||||
#include "test/cctest/cctest.h"
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include "src/base/region-allocator.h"
|
||||
#include "src/execution/isolate.h"
|
||||
#include "src/heap/heap-inl.h"
|
||||
#include "src/heap/memory-allocator.h"
|
||||
#include "src/heap/spaces-inl.h"
|
||||
#include "src/utils/ostreams.h"
|
||||
#include "test/unittests/test-utils.h"
|
||||
|
Loading…
Reference in New Issue
Block a user