[ptr-cage] Factor CodeRange out of MemoryAllocator and share along with ptr cage
This CL factors out a CodeRange class out of MemoryAllocator. When V8_COMPRESS_POINTERS_IN_SHARED_CAGE is defined, there is a single CodeRange shared by all Isolates in the process. This also turns short builtins back for both configurations of pointer compression. When sharing a cage, there is a single copy of the re-embedded builtins. Since a shared pointer cage is still experimental, to avoid API churn this CodeRange's size is not configurable and is always the maximal size depending on the underlying platform. Change-Id: Ie94f52746f2c5450247a999cc6071e3914d4cf0c Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2819206 Reviewed-by: Adam Klein <adamk@chromium.org> Reviewed-by: Igor Sheludko <ishell@chromium.org> Reviewed-by: Ulan Degenbaev <ulan@chromium.org> Commit-Queue: Shu-yu Guo <syg@chromium.org> Cr-Commit-Position: refs/heads/master@{#74261}
This commit is contained in:
parent
51c55292db
commit
0b5ec843cc
9
BUILD.gn
9
BUILD.gn
@ -413,10 +413,9 @@ if (v8_multi_arch_build &&
|
||||
v8_enable_pointer_compression_shared_cage = v8_enable_pointer_compression
|
||||
}
|
||||
if (v8_enable_short_builtin_calls &&
|
||||
(!v8_enable_pointer_compression ||
|
||||
v8_enable_pointer_compression_shared_cage || v8_control_flow_integrity)) {
|
||||
# Disable short calls when pointer compression in a per-Isolate cage is not
|
||||
# enabled. Or when CFI is enabled (until the CFI-related issues are fixed).
|
||||
(!v8_enable_pointer_compression || v8_control_flow_integrity)) {
|
||||
# Disable short calls when pointer compression is not enabled.
|
||||
# Or when CFI is enabled (until the CFI-related issues are fixed).
|
||||
v8_enable_short_builtin_calls = false
|
||||
}
|
||||
if (v8_enable_shared_ro_heap == "") {
|
||||
@ -2617,6 +2616,7 @@ v8_header_set("v8_internal_headers") {
|
||||
"src/heap/base-space.h",
|
||||
"src/heap/basic-memory-chunk.h",
|
||||
"src/heap/code-object-registry.h",
|
||||
"src/heap/code-range.h",
|
||||
"src/heap/code-stats.h",
|
||||
"src/heap/collection-barrier.h",
|
||||
"src/heap/combined-heap.h",
|
||||
@ -3741,6 +3741,7 @@ v8_source_set("v8_base_without_compiler") {
|
||||
"src/heap/base-space.cc",
|
||||
"src/heap/basic-memory-chunk.cc",
|
||||
"src/heap/code-object-registry.cc",
|
||||
"src/heap/code-range.cc",
|
||||
"src/heap/code-stats.cc",
|
||||
"src/heap/collection-barrier.cc",
|
||||
"src/heap/combined-heap.cc",
|
||||
|
@ -7136,6 +7136,11 @@ class V8_EXPORT ResourceConstraints {
|
||||
/**
|
||||
* The amount of virtual memory reserved for generated code. This is relevant
|
||||
* for 64-bit architectures that rely on code range for calls in code.
|
||||
*
|
||||
* When V8_COMPRESS_POINTERS_IN_SHARED_CAGE is defined, there is a shared
|
||||
* process-wide code range that is lazily initialized. This value is used to
|
||||
* configure that shared code range when the first Isolate is
|
||||
* created. Subsequent Isolates ignore this value.
|
||||
*/
|
||||
size_t code_range_size_in_bytes() const { return code_range_size_; }
|
||||
void set_code_range_size_in_bytes(size_t limit) { code_range_size_ = limit; }
|
||||
|
1
src/DEPS
1
src/DEPS
@ -15,6 +15,7 @@ include_rules = [
|
||||
"+src/compiler/wasm-compiler.h",
|
||||
"-src/heap",
|
||||
"+src/heap/basic-memory-chunk.h",
|
||||
"+src/heap/code-range.h",
|
||||
"+src/heap/combined-heap.h",
|
||||
"+src/heap/embedder-tracing.h",
|
||||
"+src/heap/factory.h",
|
||||
|
@ -8911,10 +8911,9 @@ void Isolate::SetStackLimit(uintptr_t stack_limit) {
|
||||
|
||||
void Isolate::GetCodeRange(void** start, size_t* length_in_bytes) {
|
||||
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
|
||||
const base::AddressRegion& code_range =
|
||||
isolate->heap()->memory_allocator()->code_range();
|
||||
*start = reinterpret_cast<void*>(code_range.begin());
|
||||
*length_in_bytes = code_range.size();
|
||||
const base::AddressRegion& code_region = isolate->heap()->code_region();
|
||||
*start = reinterpret_cast<void*>(code_region.begin());
|
||||
*length_in_bytes = code_region.size();
|
||||
}
|
||||
|
||||
void Isolate::GetEmbeddedCodeRange(const void** start,
|
||||
|
@ -53,10 +53,12 @@
|
||||
#define V8_BASE_ONCE_H_
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
#include <atomic>
|
||||
#include <functional>
|
||||
|
||||
#include "src/base/base-export.h"
|
||||
#include "src/base/template-utils.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace base {
|
||||
@ -76,9 +78,9 @@ enum : uint8_t {
|
||||
|
||||
using PointerArgFunction = void (*)(void* arg);
|
||||
|
||||
template <typename T>
|
||||
struct OneArgFunction {
|
||||
using type = void (*)(T);
|
||||
template <typename... Args>
|
||||
struct FunctionWithArgs {
|
||||
using type = void (*)(Args...);
|
||||
};
|
||||
|
||||
V8_BASE_EXPORT void CallOnceImpl(OnceType* once,
|
||||
@ -90,11 +92,13 @@ inline void CallOnce(OnceType* once, std::function<void()> init_func) {
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Arg>
|
||||
template <typename... Args, typename = std::enable_if_t<
|
||||
conjunction<std::is_scalar<Args>...>::value>>
|
||||
inline void CallOnce(OnceType* once,
|
||||
typename OneArgFunction<Arg*>::type init_func, Arg* arg) {
|
||||
typename FunctionWithArgs<Args...>::type init_func,
|
||||
Args... args) {
|
||||
if (once->load(std::memory_order_acquire) != ONCE_STATE_DONE) {
|
||||
CallOnceImpl(once, [=]() { init_func(arg); });
|
||||
CallOnceImpl(once, [=]() { init_func(args...); });
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -98,6 +98,15 @@ struct make_void {
|
||||
template <class... Ts>
|
||||
using void_t = typename make_void<Ts...>::type;
|
||||
|
||||
// Corresponds to C++17's std::conjunction
|
||||
template <class...>
|
||||
struct conjunction : std::true_type {};
|
||||
template <class B>
|
||||
struct conjunction<B> : B {};
|
||||
template <class B, class... Bn>
|
||||
struct conjunction<B, Bn...>
|
||||
: std::conditional_t<bool(B::value), conjunction<Bn...>, B> {};
|
||||
|
||||
} // namespace base
|
||||
} // namespace v8
|
||||
|
||||
|
@ -42,10 +42,10 @@ AssemblerOptions BuiltinAssemblerOptions(Isolate* isolate,
|
||||
return options;
|
||||
}
|
||||
|
||||
const base::AddressRegion& code_range = isolate->heap()->code_range();
|
||||
const base::AddressRegion& code_region = isolate->heap()->code_region();
|
||||
bool pc_relative_calls_fit_in_code_range =
|
||||
!code_range.is_empty() &&
|
||||
std::ceil(static_cast<float>(code_range.size() / MB)) <=
|
||||
!code_region.is_empty() &&
|
||||
std::ceil(static_cast<float>(code_region.size() / MB)) <=
|
||||
kMaxPCRelativeCodeRangeInMB;
|
||||
|
||||
options.isolate_independent_code = true;
|
||||
|
@ -69,7 +69,7 @@ AssemblerOptions AssemblerOptions::Default(Isolate* isolate) {
|
||||
#endif
|
||||
options.inline_offheap_trampolines &= !generating_embedded_builtin;
|
||||
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
|
||||
const base::AddressRegion& code_range = isolate->heap()->code_range();
|
||||
const base::AddressRegion& code_range = isolate->heap()->code_region();
|
||||
DCHECK_IMPLIES(code_range.begin() != kNullAddress, !code_range.is_empty());
|
||||
options.code_range_start = code_range.begin();
|
||||
#endif
|
||||
|
@ -101,11 +101,10 @@ STATIC_ASSERT(V8_DEFAULT_STACK_SIZE_KB* KB +
|
||||
|
||||
// Determine whether the short builtin calls optimization is enabled.
|
||||
#ifdef V8_SHORT_BUILTIN_CALLS
|
||||
#ifndef V8_COMPRESS_POINTERS_IN_ISOLATE_CAGE
|
||||
#ifndef V8_COMPRESS_POINTERS
|
||||
// TODO(11527): Fix this by passing Isolate* to Code::OffHeapInstructionStart()
|
||||
// and friends.
|
||||
#error Short builtin calls feature require pointer compression with per- \
|
||||
Isolate cage
|
||||
#error Short builtin calls feature requires pointer compression
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
@ -3053,9 +3053,8 @@ void Isolate::Deinit() {
|
||||
#if defined(V8_OS_WIN64)
|
||||
if (win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange() &&
|
||||
heap()->memory_allocator() && RequiresCodeRange()) {
|
||||
const base::AddressRegion& code_range =
|
||||
heap()->memory_allocator()->code_range();
|
||||
void* start = reinterpret_cast<void*>(code_range.begin());
|
||||
const base::AddressRegion& code_region = heap()->code_region();
|
||||
void* start = reinterpret_cast<void*>(code_region.begin());
|
||||
win64_unwindinfo::UnregisterNonABICompliantCodeRange(start);
|
||||
}
|
||||
#endif // V8_OS_WIN64
|
||||
@ -3427,8 +3426,9 @@ void Isolate::MaybeRemapEmbeddedBuiltinsIntoCodeRange() {
|
||||
CHECK_NOT_NULL(embedded_blob_code_);
|
||||
CHECK_NE(embedded_blob_code_size_, 0);
|
||||
|
||||
embedded_blob_code_ = heap_.RemapEmbeddedBuiltinsIntoCodeRange(
|
||||
embedded_blob_code_, embedded_blob_code_size_);
|
||||
DCHECK_NOT_NULL(heap_.code_range_);
|
||||
embedded_blob_code_ = heap_.code_range_->RemapEmbeddedBuiltins(
|
||||
this, embedded_blob_code_, embedded_blob_code_size_);
|
||||
CHECK_NOT_NULL(embedded_blob_code_);
|
||||
// The un-embedded code blob is already a part of the registered code range
|
||||
// so it's not necessary to register it again.
|
||||
@ -3600,10 +3600,20 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
|
||||
heap_.SetUpSpaces();
|
||||
|
||||
if (V8_SHORT_BUILTIN_CALLS_BOOL && FLAG_short_builtin_calls) {
|
||||
// Check if the system has more than 4GB of physical memory by comaring
|
||||
// the old space size with respective threshod value.
|
||||
is_short_builtin_calls_enabled_ =
|
||||
heap_.MaxOldGenerationSize() >= kShortBuiltinCallsOldSpaceSizeThreshold;
|
||||
// Check if the system has more than 4GB of physical memory by comparing the
|
||||
// old space size with respective threshold value.
|
||||
//
|
||||
// Additionally, enable if there is already a process-wide CodeRange that
|
||||
// has re-embedded builtins.
|
||||
is_short_builtin_calls_enabled_ = (heap_.MaxOldGenerationSize() >=
|
||||
kShortBuiltinCallsOldSpaceSizeThreshold);
|
||||
if (COMPRESS_POINTERS_IN_SHARED_CAGE_BOOL) {
|
||||
std::shared_ptr<CodeRange> code_range =
|
||||
CodeRange::GetProcessWideCodeRange();
|
||||
if (code_range && code_range->embedded_blob_code_copy() != nullptr) {
|
||||
is_short_builtin_calls_enabled_ = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create LocalIsolate/LocalHeap for the main thread and set state to Running.
|
||||
@ -3774,10 +3784,9 @@ bool Isolate::Init(SnapshotData* startup_snapshot_data,
|
||||
|
||||
#if defined(V8_OS_WIN64)
|
||||
if (win64_unwindinfo::CanRegisterUnwindInfoForNonABICompliantCodeRange()) {
|
||||
const base::AddressRegion& code_range =
|
||||
heap()->memory_allocator()->code_range();
|
||||
void* start = reinterpret_cast<void*>(code_range.begin());
|
||||
size_t size_in_bytes = code_range.size();
|
||||
const base::AddressRegion& code_region = heap()->code_region();
|
||||
void* start = reinterpret_cast<void*>(code_region.begin());
|
||||
size_t size_in_bytes = code_region.size();
|
||||
win64_unwindinfo::RegisterNonABICompliantCodeRange(start, size_in_bytes);
|
||||
}
|
||||
#endif // V8_OS_WIN64
|
||||
|
@ -1035,6 +1035,15 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
|
||||
return isolate_data()->cage_base();
|
||||
}
|
||||
|
||||
// When pointer compression is on, the PtrComprCage used by this
|
||||
// Isolate. Otherwise nullptr.
|
||||
VirtualMemoryCage* GetPtrComprCage() {
|
||||
return isolate_allocator_->GetPtrComprCage();
|
||||
}
|
||||
const VirtualMemoryCage* GetPtrComprCage() const {
|
||||
return isolate_allocator_->GetPtrComprCage();
|
||||
}
|
||||
|
||||
// Generated code can embed this address to get access to the isolate-specific
|
||||
// data (for example, roots, external references, builtins, etc.).
|
||||
// The kRootRegister is set to this value.
|
||||
|
165
src/heap/code-range.cc
Normal file
165
src/heap/code-range.cc
Normal file
@ -0,0 +1,165 @@
|
||||
// Copyright 2021 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/heap/code-range.h"
|
||||
|
||||
#include "src/base/lazy-instance.h"
|
||||
#include "src/common/globals.h"
|
||||
#include "src/flags/flags.h"
|
||||
#include "src/heap/heap-inl.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
namespace {
|
||||
|
||||
DEFINE_LAZY_LEAKY_OBJECT_GETTER(std::shared_ptr<CodeRange>,
|
||||
GetProcessWideCodeRangeCage)
|
||||
|
||||
DEFINE_LAZY_LEAKY_OBJECT_GETTER(CodeRangeAddressHint, GetCodeRangeAddressHint)
|
||||
|
||||
void FunctionInStaticBinaryForAddressHint() {}
|
||||
} // anonymous namespace
|
||||
|
||||
Address CodeRangeAddressHint::GetAddressHint(size_t code_range_size) {
|
||||
base::MutexGuard guard(&mutex_);
|
||||
auto it = recently_freed_.find(code_range_size);
|
||||
if (it == recently_freed_.end() || it->second.empty()) {
|
||||
return FUNCTION_ADDR(&FunctionInStaticBinaryForAddressHint);
|
||||
}
|
||||
Address result = it->second.back();
|
||||
it->second.pop_back();
|
||||
return result;
|
||||
}
|
||||
|
||||
void CodeRangeAddressHint::NotifyFreedCodeRange(Address code_range_start,
|
||||
size_t code_range_size) {
|
||||
base::MutexGuard guard(&mutex_);
|
||||
recently_freed_[code_range_size].push_back(code_range_start);
|
||||
}
|
||||
|
||||
CodeRange::~CodeRange() { Free(); }
|
||||
|
||||
bool CodeRange::InitReservation(v8::PageAllocator* page_allocator,
|
||||
size_t requested) {
|
||||
DCHECK_NE(requested, 0);
|
||||
|
||||
if (requested <= kMinimumCodeRangeSize) {
|
||||
requested = kMinimumCodeRangeSize;
|
||||
}
|
||||
const size_t reserved_area =
|
||||
kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize();
|
||||
if (requested < (kMaximalCodeRangeSize - reserved_area)) {
|
||||
requested += RoundUp(reserved_area, MemoryChunk::kPageSize);
|
||||
// Fullfilling both reserved pages requirement and huge code area
|
||||
// alignments is not supported (requires re-implementation).
|
||||
DCHECK_LE(kMinExpectedOSPageSize, page_allocator->AllocatePageSize());
|
||||
}
|
||||
DCHECK_IMPLIES(kPlatformRequiresCodeRange,
|
||||
requested <= kMaximalCodeRangeSize);
|
||||
|
||||
VirtualMemoryCage::ReservationParams params;
|
||||
params.page_allocator = page_allocator;
|
||||
params.reservation_size = requested;
|
||||
params.base_alignment =
|
||||
VirtualMemoryCage::ReservationParams::kAnyBaseAlignment;
|
||||
params.base_bias_size = reserved_area;
|
||||
params.page_size = MemoryChunk::kPageSize;
|
||||
params.requested_start_hint =
|
||||
GetCodeRangeAddressHint()->GetAddressHint(requested);
|
||||
|
||||
if (!VirtualMemoryCage::InitReservation(params)) return false;
|
||||
|
||||
// On some platforms, specifically Win64, we need to reserve some pages at
|
||||
// the beginning of an executable space. See
|
||||
// https://cs.chromium.org/chromium/src/components/crash/content/
|
||||
// app/crashpad_win.cc?rcl=fd680447881449fba2edcf0589320e7253719212&l=204
|
||||
// for details.
|
||||
if (reserved_area > 0) {
|
||||
if (!reservation()->SetPermissions(reservation()->address(), reserved_area,
|
||||
PageAllocator::kReadWrite)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void CodeRange::Free() {
|
||||
if (IsReserved()) {
|
||||
GetCodeRangeAddressHint()->NotifyFreedCodeRange(
|
||||
reservation()->region().begin(), reservation()->region().size());
|
||||
VirtualMemoryCage::Free();
|
||||
}
|
||||
}
|
||||
|
||||
uint8_t* CodeRange::RemapEmbeddedBuiltins(Isolate* isolate,
|
||||
const uint8_t* embedded_blob_code,
|
||||
size_t embedded_blob_code_size) {
|
||||
const base::AddressRegion& code_region = reservation()->region();
|
||||
CHECK_NE(code_region.begin(), kNullAddress);
|
||||
CHECK(!code_region.is_empty());
|
||||
|
||||
if (embedded_blob_code_copy_) {
|
||||
DCHECK(code_region.contains(
|
||||
reinterpret_cast<Address>(embedded_blob_code_copy_),
|
||||
embedded_blob_code_size));
|
||||
SLOW_DCHECK(memcmp(embedded_blob_code, embedded_blob_code_copy_,
|
||||
embedded_blob_code_size) == 0);
|
||||
return embedded_blob_code_copy_;
|
||||
}
|
||||
|
||||
const size_t kAllocatePageSize = page_allocator()->AllocatePageSize();
|
||||
size_t allocate_code_size =
|
||||
RoundUp(embedded_blob_code_size, kAllocatePageSize);
|
||||
|
||||
// Allocate the re-embedded code blob in the end.
|
||||
void* hint = reinterpret_cast<void*>(code_region.end() - allocate_code_size);
|
||||
|
||||
void* embedded_blob_copy = page_allocator()->AllocatePages(
|
||||
hint, allocate_code_size, kAllocatePageSize, PageAllocator::kNoAccess);
|
||||
|
||||
if (!embedded_blob_copy) {
|
||||
V8::FatalProcessOutOfMemory(
|
||||
isolate, "Can't allocate space for re-embedded builtins");
|
||||
}
|
||||
|
||||
size_t code_size =
|
||||
RoundUp(embedded_blob_code_size, page_allocator()->CommitPageSize());
|
||||
|
||||
if (!page_allocator()->SetPermissions(embedded_blob_copy, code_size,
|
||||
PageAllocator::kReadWrite)) {
|
||||
V8::FatalProcessOutOfMemory(isolate,
|
||||
"Re-embedded builtins: set permissions");
|
||||
}
|
||||
memcpy(embedded_blob_copy, embedded_blob_code, embedded_blob_code_size);
|
||||
|
||||
if (!page_allocator()->SetPermissions(embedded_blob_copy, code_size,
|
||||
PageAllocator::kReadExecute)) {
|
||||
V8::FatalProcessOutOfMemory(isolate,
|
||||
"Re-embedded builtins: set permissions");
|
||||
}
|
||||
|
||||
embedded_blob_code_copy_ = reinterpret_cast<uint8_t*>(embedded_blob_copy);
|
||||
return embedded_blob_code_copy_;
|
||||
}
|
||||
|
||||
// static
|
||||
void CodeRange::InitializeProcessWideCodeRangeOnce(
|
||||
v8::PageAllocator* page_allocator, size_t requested_size) {
|
||||
*GetProcessWideCodeRangeCage() = std::make_shared<CodeRange>();
|
||||
if (!GetProcessWideCodeRange()->InitReservation(page_allocator,
|
||||
requested_size)) {
|
||||
V8::FatalProcessOutOfMemory(
|
||||
nullptr, "Failed to reserve virtual memory for CodeRange");
|
||||
}
|
||||
}
|
||||
|
||||
// static
|
||||
std::shared_ptr<CodeRange> CodeRange::GetProcessWideCodeRange() {
|
||||
return *GetProcessWideCodeRangeCage();
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
105
src/heap/code-range.h
Normal file
105
src/heap/code-range.h
Normal file
@ -0,0 +1,105 @@
|
||||
// Copyright 2021 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_HEAP_CODE_RANGE_H_
|
||||
#define V8_HEAP_CODE_RANGE_H_
|
||||
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
|
||||
#include "src/base/platform/mutex.h"
|
||||
#include "src/common/globals.h"
|
||||
#include "src/utils/allocation.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
// The process-wide singleton that keeps track of code range regions with the
|
||||
// intention to reuse free code range regions as a workaround for CFG memory
|
||||
// leaks (see crbug.com/870054).
|
||||
class CodeRangeAddressHint {
|
||||
public:
|
||||
// Returns the most recently freed code range start address for the given
|
||||
// size. If there is no such entry, then a random address is returned.
|
||||
V8_EXPORT_PRIVATE Address GetAddressHint(size_t code_range_size);
|
||||
|
||||
V8_EXPORT_PRIVATE void NotifyFreedCodeRange(Address code_range_start,
|
||||
size_t code_range_size);
|
||||
|
||||
private:
|
||||
base::Mutex mutex_;
|
||||
// A map from code range size to an array of recently freed code range
|
||||
// addresses. There should be O(1) different code range sizes.
|
||||
// The length of each array is limited by the peak number of code ranges,
|
||||
// which should be also O(1).
|
||||
std::unordered_map<size_t, std::vector<Address>> recently_freed_;
|
||||
};
|
||||
|
||||
// A code range is a virtual memory cage that may contain executable code. It
|
||||
// has the following layout.
|
||||
//
|
||||
// +------------+-----+---------------- ~~~ -+
|
||||
// | RW | ... | ... |
|
||||
// +------------+-----+----------------- ~~~ -+
|
||||
// ^ ^ ^
|
||||
// start base allocatable base
|
||||
//
|
||||
// <------------> <------------------------>
|
||||
// reserved allocatable region
|
||||
// <------------------------------------------->
|
||||
// code region
|
||||
//
|
||||
// The start of the reservation may include reserved page with read-write access
|
||||
// as required by some platforms (Win64). The cage's page allocator does not
|
||||
// control the optional reserved page in the beginning of the code region.
|
||||
//
|
||||
// The following conditions hold:
|
||||
// 1) |reservation()->region()| >= |optional RW pages| +
|
||||
// |reservation()->page_allocator()|
|
||||
// 2) |reservation()| is AllocatePageSize()-aligned
|
||||
// 3) |reservation()->page_allocator()| (i.e. allocatable base) is
|
||||
// MemoryChunk::kAlignment-aligned
|
||||
// 4) |base()| is CommitPageSize()-aligned
|
||||
class CodeRange final : public VirtualMemoryCage {
|
||||
public:
|
||||
V8_EXPORT_PRIVATE ~CodeRange();
|
||||
|
||||
uint8_t* embedded_blob_code_copy() const { return embedded_blob_code_copy_; }
|
||||
|
||||
bool InitReservation(v8::PageAllocator* page_allocator, size_t requested);
|
||||
|
||||
void Free();
|
||||
|
||||
// Remap and copy the embedded builtins into this CodeRange. This method is
|
||||
// idempotent and only performs the copy once. This property is so that this
|
||||
// method can be used uniformly regardless of having a per-Isolate or a shared
|
||||
// pointer cage. Returns the address of the copy.
|
||||
//
|
||||
// The builtins code region will be freed with the code range at tear down.
|
||||
//
|
||||
// When ENABLE_SLOW_DCHECKS is on, the contents of the embedded_blob_code are
|
||||
// compared against the already copied version.
|
||||
uint8_t* RemapEmbeddedBuiltins(Isolate* isolate,
|
||||
const uint8_t* embedded_blob_code,
|
||||
size_t embedded_blob_code_size);
|
||||
|
||||
// Initializes the process-wide code range if RequiresProcessWideCodeRange()
|
||||
// is true.
|
||||
static void InitializeProcessWideCodeRangeOnce(
|
||||
v8::PageAllocator* page_allocator, size_t requested_size);
|
||||
|
||||
// If InitializeProcessWideCodeRangeOnce has been called, returns the
|
||||
// initialized CodeRange. Otherwise returns an empty std::shared_ptr.
|
||||
static std::shared_ptr<CodeRange> GetProcessWideCodeRange();
|
||||
|
||||
private:
|
||||
// Used when short builtin calls are enabled, where embedded builtins are
|
||||
// copied into the CodeRange so calls can be nearer.
|
||||
uint8_t* embedded_blob_code_copy_ = nullptr;
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_HEAP_CODE_RANGE_H_
|
@ -154,9 +154,8 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
|
||||
if (is_executable_) {
|
||||
DCHECK(IsAligned(code->address(), kCodeAlignment));
|
||||
DCHECK_IMPLIES(
|
||||
!V8_ENABLE_THIRD_PARTY_HEAP_BOOL &&
|
||||
!heap->memory_allocator()->code_range().is_empty(),
|
||||
heap->memory_allocator()->code_range().contains(code->address()));
|
||||
!V8_ENABLE_THIRD_PARTY_HEAP_BOOL && !heap->code_region().is_empty(),
|
||||
heap->code_region().contains(code->address()));
|
||||
}
|
||||
|
||||
constexpr bool kIsNotOffHeapTrampoline = false;
|
||||
@ -2064,9 +2063,8 @@ Handle<Code> Factory::CopyCode(Handle<Code> code) {
|
||||
#endif
|
||||
DCHECK(IsAligned(new_code->address(), kCodeAlignment));
|
||||
DCHECK_IMPLIES(
|
||||
!V8_ENABLE_THIRD_PARTY_HEAP_BOOL &&
|
||||
!heap->memory_allocator()->code_range().is_empty(),
|
||||
heap->memory_allocator()->code_range().contains(new_code->address()));
|
||||
!V8_ENABLE_THIRD_PARTY_HEAP_BOOL && !heap->code_region().is_empty(),
|
||||
heap->code_region().contains(new_code->address()));
|
||||
return new_code;
|
||||
}
|
||||
|
||||
|
@ -166,11 +166,12 @@ size_t Heap::NewSpaceAllocationCounter() {
|
||||
return new_space_allocation_counter_ + new_space()->AllocatedSinceLastGC();
|
||||
}
|
||||
|
||||
inline const base::AddressRegion& Heap::code_range() {
|
||||
inline const base::AddressRegion& Heap::code_region() {
|
||||
#ifdef V8_ENABLE_THIRD_PARTY_HEAP
|
||||
return tp_heap_->GetCodeRange();
|
||||
#else
|
||||
return memory_allocator_->code_range();
|
||||
static constexpr base::AddressRegion kEmptyRegion;
|
||||
return code_range_ ? code_range_->reservation()->region() : kEmptyRegion;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -35,6 +35,7 @@
|
||||
#include "src/heap/barrier.h"
|
||||
#include "src/heap/base/stack.h"
|
||||
#include "src/heap/code-object-registry.h"
|
||||
#include "src/heap/code-range.h"
|
||||
#include "src/heap/code-stats.h"
|
||||
#include "src/heap/collection-barrier.h"
|
||||
#include "src/heap/combined-heap.h"
|
||||
@ -5243,6 +5244,10 @@ HeapObject Heap::AllocateRawWithRetryOrFailSlowPath(
|
||||
return HeapObject();
|
||||
}
|
||||
|
||||
namespace {
|
||||
V8_DECLARE_ONCE(initialize_shared_code_range_once);
|
||||
} // namespace
|
||||
|
||||
void Heap::SetUp() {
|
||||
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
|
||||
allocation_timeout_ = NextAllocationTimeout();
|
||||
@ -5264,9 +5269,45 @@ void Heap::SetUp() {
|
||||
reinterpret_cast<uintptr_t>(v8::internal::GetRandomMmapAddr()) &
|
||||
~kMmapRegionMask;
|
||||
|
||||
v8::PageAllocator* code_page_allocator;
|
||||
if (isolate_->RequiresCodeRange() || code_range_size_ != 0) {
|
||||
const size_t requested_size =
|
||||
code_range_size_ == 0 ? kMaximalCodeRangeSize : code_range_size_;
|
||||
// When a target requires the code range feature, we put all code objects in
|
||||
// a contiguous range of virtual address space, so that they can call each
|
||||
// other with near calls.
|
||||
if (COMPRESS_POINTERS_IN_SHARED_CAGE_BOOL) {
|
||||
// When sharing a pointer cage among Isolates, also share the
|
||||
// CodeRange. isolate_->page_allocator() is the process-wide pointer
|
||||
// compression cage's PageAllocator.
|
||||
base::CallOnce(&initialize_shared_code_range_once,
|
||||
&CodeRange::InitializeProcessWideCodeRangeOnce,
|
||||
isolate_->page_allocator(), requested_size);
|
||||
code_range_ = CodeRange::GetProcessWideCodeRange();
|
||||
} else {
|
||||
code_range_ = std::make_shared<CodeRange>();
|
||||
if (!code_range_->InitReservation(isolate_->page_allocator(),
|
||||
requested_size)) {
|
||||
V8::FatalProcessOutOfMemory(
|
||||
isolate_, "Failed to reserve virtual memory for CodeRange");
|
||||
}
|
||||
}
|
||||
|
||||
LOG(isolate_,
|
||||
NewEvent("CodeRange",
|
||||
reinterpret_cast<void*>(code_range_->reservation()->address()),
|
||||
code_range_size_));
|
||||
|
||||
isolate_->AddCodeRange(code_range_->reservation()->region().begin(),
|
||||
code_range_->reservation()->region().size());
|
||||
code_page_allocator = code_range_->page_allocator();
|
||||
} else {
|
||||
code_page_allocator = isolate_->page_allocator();
|
||||
}
|
||||
|
||||
// Set up memory allocator.
|
||||
memory_allocator_.reset(
|
||||
new MemoryAllocator(isolate_, MaxReserved(), code_range_size_));
|
||||
new MemoryAllocator(isolate_, code_page_allocator, MaxReserved()));
|
||||
|
||||
mark_compact_collector_.reset(new MarkCompactCollector(this));
|
||||
|
||||
@ -5308,49 +5349,6 @@ void Heap::ReplaceReadOnlySpace(SharedReadOnlySpace* space) {
|
||||
read_only_space_ = space;
|
||||
}
|
||||
|
||||
uint8_t* Heap::RemapEmbeddedBuiltinsIntoCodeRange(
|
||||
const uint8_t* embedded_blob_code, size_t embedded_blob_code_size) {
|
||||
const base::AddressRegion& code_range = memory_allocator()->code_range();
|
||||
|
||||
CHECK_NE(code_range.begin(), kNullAddress);
|
||||
CHECK(!code_range.is_empty());
|
||||
|
||||
v8::PageAllocator* code_page_allocator =
|
||||
memory_allocator()->code_page_allocator();
|
||||
|
||||
const size_t kAllocatePageSize = code_page_allocator->AllocatePageSize();
|
||||
size_t allocate_code_size =
|
||||
RoundUp(embedded_blob_code_size, kAllocatePageSize);
|
||||
|
||||
// Allocate the re-embedded code blob in the end.
|
||||
void* hint = reinterpret_cast<void*>(code_range.end() - allocate_code_size);
|
||||
|
||||
void* embedded_blob_copy = code_page_allocator->AllocatePages(
|
||||
hint, allocate_code_size, kAllocatePageSize, PageAllocator::kNoAccess);
|
||||
|
||||
if (!embedded_blob_copy) {
|
||||
V8::FatalProcessOutOfMemory(
|
||||
isolate(), "Can't allocate space for re-embedded builtins");
|
||||
}
|
||||
|
||||
size_t code_size =
|
||||
RoundUp(embedded_blob_code_size, code_page_allocator->CommitPageSize());
|
||||
|
||||
if (!code_page_allocator->SetPermissions(embedded_blob_copy, code_size,
|
||||
PageAllocator::kReadWrite)) {
|
||||
V8::FatalProcessOutOfMemory(isolate(),
|
||||
"Re-embedded builtins: set permissions");
|
||||
}
|
||||
memcpy(embedded_blob_copy, embedded_blob_code, embedded_blob_code_size);
|
||||
|
||||
if (!code_page_allocator->SetPermissions(embedded_blob_copy, code_size,
|
||||
PageAllocator::kReadExecute)) {
|
||||
V8::FatalProcessOutOfMemory(isolate(),
|
||||
"Re-embedded builtins: set permissions");
|
||||
}
|
||||
return reinterpret_cast<uint8_t*>(embedded_blob_copy);
|
||||
}
|
||||
|
||||
class StressConcurrentAllocationObserver : public AllocationObserver {
|
||||
public:
|
||||
explicit StressConcurrentAllocationObserver(Heap* heap)
|
||||
|
@ -66,6 +66,7 @@ class ArrayBufferCollector;
|
||||
class ArrayBufferSweeper;
|
||||
class BasicMemoryChunk;
|
||||
class CodeLargeObjectSpace;
|
||||
class CodeRange;
|
||||
class CollectionBarrier;
|
||||
class ConcurrentAllocator;
|
||||
class ConcurrentMarking;
|
||||
@ -827,12 +828,6 @@ class Heap {
|
||||
// Create ObjectStats if live_object_stats_ or dead_object_stats_ are nullptr.
|
||||
void CreateObjectStats();
|
||||
|
||||
// If the code range exists, allocates executable pages in the code range and
|
||||
// copies the embedded builtins code blob there. Returns address of the copy.
|
||||
// The builtins code region will be freed with the code range at tear down.
|
||||
uint8_t* RemapEmbeddedBuiltinsIntoCodeRange(const uint8_t* embedded_blob_code,
|
||||
size_t embedded_blob_code_size);
|
||||
|
||||
// Sets the TearDown state, so no new GC tasks get posted.
|
||||
void StartTearDown();
|
||||
|
||||
@ -892,7 +887,7 @@ class Heap {
|
||||
return array_buffer_sweeper_.get();
|
||||
}
|
||||
|
||||
const base::AddressRegion& code_range();
|
||||
const base::AddressRegion& code_region();
|
||||
|
||||
LocalHeap* main_thread_local_heap() { return main_thread_local_heap_; }
|
||||
|
||||
@ -2305,6 +2300,13 @@ class Heap {
|
||||
std::unique_ptr<LocalEmbedderHeapTracer> local_embedder_heap_tracer_;
|
||||
std::unique_ptr<MarkingBarrier> marking_barrier_;
|
||||
|
||||
// This object controls virtual space reserved for code on the V8 heap. This
|
||||
// is only valid for 64-bit architectures where kRequiresCodeRange.
|
||||
//
|
||||
// Owned by the heap when !V8_COMPRESS_POINTERS_IN_SHARED_CAGE, otherwise is
|
||||
// process-wide.
|
||||
std::shared_ptr<CodeRange> code_range_;
|
||||
|
||||
// The embedder owns the C++ heap.
|
||||
v8::CppHeap* cpp_heap_ = nullptr;
|
||||
|
||||
|
@ -20,118 +20,23 @@
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
static base::LazyInstance<CodeRangeAddressHint>::type code_range_address_hint =
|
||||
LAZY_INSTANCE_INITIALIZER;
|
||||
|
||||
namespace {
|
||||
void FunctionInStaticBinaryForAddressHint() {}
|
||||
} // namespace
|
||||
|
||||
Address CodeRangeAddressHint::GetAddressHint(size_t code_range_size) {
|
||||
base::MutexGuard guard(&mutex_);
|
||||
auto it = recently_freed_.find(code_range_size);
|
||||
if (it == recently_freed_.end() || it->second.empty()) {
|
||||
return FUNCTION_ADDR(&FunctionInStaticBinaryForAddressHint);
|
||||
}
|
||||
Address result = it->second.back();
|
||||
it->second.pop_back();
|
||||
return result;
|
||||
}
|
||||
|
||||
void CodeRangeAddressHint::NotifyFreedCodeRange(Address code_range_start,
|
||||
size_t code_range_size) {
|
||||
base::MutexGuard guard(&mutex_);
|
||||
recently_freed_[code_range_size].push_back(code_range_start);
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// MemoryAllocator
|
||||
//
|
||||
|
||||
MemoryAllocator::MemoryAllocator(Isolate* isolate, size_t capacity,
|
||||
size_t code_range_size)
|
||||
MemoryAllocator::MemoryAllocator(Isolate* isolate,
|
||||
v8::PageAllocator* code_page_allocator,
|
||||
size_t capacity)
|
||||
: isolate_(isolate),
|
||||
data_page_allocator_(isolate->page_allocator()),
|
||||
code_page_allocator_(nullptr),
|
||||
code_page_allocator_(code_page_allocator),
|
||||
capacity_(RoundUp(capacity, Page::kPageSize)),
|
||||
size_(0),
|
||||
size_executable_(0),
|
||||
lowest_ever_allocated_(static_cast<Address>(-1ll)),
|
||||
highest_ever_allocated_(kNullAddress),
|
||||
unmapper_(isolate->heap(), this) {
|
||||
InitializeCodePageAllocator(data_page_allocator_, code_range_size);
|
||||
}
|
||||
|
||||
void MemoryAllocator::InitializeCodePageAllocator(
|
||||
v8::PageAllocator* page_allocator, size_t requested) {
|
||||
DCHECK_NULL(code_page_allocator_instance_.get());
|
||||
|
||||
code_page_allocator_ = page_allocator;
|
||||
|
||||
if (requested == 0) {
|
||||
if (!isolate_->RequiresCodeRange()) return;
|
||||
// When a target requires the code range feature, we put all code objects
|
||||
// in a kMaximalCodeRangeSize range of virtual address space, so that
|
||||
// they can call each other with near calls.
|
||||
requested = kMaximalCodeRangeSize;
|
||||
} else if (requested <= kMinimumCodeRangeSize) {
|
||||
requested = kMinimumCodeRangeSize;
|
||||
}
|
||||
|
||||
const size_t reserved_area =
|
||||
kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize();
|
||||
if (requested < (kMaximalCodeRangeSize - reserved_area)) {
|
||||
requested += RoundUp(reserved_area, MemoryChunk::kPageSize);
|
||||
// Fullfilling both reserved pages requirement and huge code area
|
||||
// alignments is not supported (requires re-implementation).
|
||||
DCHECK_LE(kMinExpectedOSPageSize, page_allocator->AllocatePageSize());
|
||||
}
|
||||
DCHECK(!isolate_->RequiresCodeRange() || requested <= kMaximalCodeRangeSize);
|
||||
|
||||
Address hint =
|
||||
RoundDown(code_range_address_hint.Pointer()->GetAddressHint(requested),
|
||||
page_allocator->AllocatePageSize());
|
||||
VirtualMemory reservation(
|
||||
page_allocator, requested, reinterpret_cast<void*>(hint),
|
||||
std::max(kMinExpectedOSPageSize, page_allocator->AllocatePageSize()));
|
||||
if (!reservation.IsReserved()) {
|
||||
V8::FatalProcessOutOfMemory(isolate_,
|
||||
"CodeRange setup: allocate virtual memory");
|
||||
}
|
||||
code_range_ = reservation.region();
|
||||
isolate_->AddCodeRange(code_range_.begin(), code_range_.size());
|
||||
|
||||
// We are sure that we have mapped a block of requested addresses.
|
||||
DCHECK_GE(reservation.size(), requested);
|
||||
Address base = reservation.address();
|
||||
|
||||
// On some platforms, specifically Win64, we need to reserve some pages at
|
||||
// the beginning of an executable space. See
|
||||
// https://cs.chromium.org/chromium/src/components/crash/content/
|
||||
// app/crashpad_win.cc?rcl=fd680447881449fba2edcf0589320e7253719212&l=204
|
||||
// for details.
|
||||
if (reserved_area > 0) {
|
||||
if (!reservation.SetPermissions(base, reserved_area,
|
||||
PageAllocator::kReadWrite))
|
||||
V8::FatalProcessOutOfMemory(isolate_, "CodeRange setup: set permissions");
|
||||
|
||||
base += reserved_area;
|
||||
}
|
||||
Address aligned_base = RoundUp(base, MemoryChunk::kAlignment);
|
||||
size_t size =
|
||||
RoundDown(reservation.size() - (aligned_base - base) - reserved_area,
|
||||
MemoryChunk::kPageSize);
|
||||
DCHECK(IsAligned(aligned_base, kMinExpectedOSPageSize));
|
||||
|
||||
LOG(isolate_,
|
||||
NewEvent("CodeRange", reinterpret_cast<void*>(reservation.address()),
|
||||
requested));
|
||||
|
||||
code_reservation_ = std::move(reservation);
|
||||
code_page_allocator_instance_ = std::make_unique<base::BoundedPageAllocator>(
|
||||
page_allocator, aligned_base, size,
|
||||
static_cast<size_t>(MemoryChunk::kAlignment));
|
||||
code_page_allocator_ = code_page_allocator_instance_.get();
|
||||
DCHECK_NOT_NULL(code_page_allocator);
|
||||
}
|
||||
|
||||
void MemoryAllocator::TearDown() {
|
||||
@ -147,13 +52,6 @@ void MemoryAllocator::TearDown() {
|
||||
last_chunk_.Free();
|
||||
}
|
||||
|
||||
if (code_page_allocator_instance_.get()) {
|
||||
DCHECK(!code_range_.is_empty());
|
||||
code_range_address_hint.Pointer()->NotifyFreedCodeRange(code_range_.begin(),
|
||||
code_range_.size());
|
||||
code_range_ = base::AddressRegion();
|
||||
code_page_allocator_instance_.reset();
|
||||
}
|
||||
code_page_allocator_ = nullptr;
|
||||
data_page_allocator_ = nullptr;
|
||||
}
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include "src/base/macros.h"
|
||||
#include "src/base/platform/mutex.h"
|
||||
#include "src/base/platform/semaphore.h"
|
||||
#include "src/heap/code-range.h"
|
||||
#include "src/heap/heap.h"
|
||||
#include "src/heap/memory-chunk.h"
|
||||
#include "src/heap/spaces.h"
|
||||
@ -30,27 +31,6 @@ class Heap;
|
||||
class Isolate;
|
||||
class ReadOnlyPage;
|
||||
|
||||
// The process-wide singleton that keeps track of code range regions with the
|
||||
// intention to reuse free code range regions as a workaround for CFG memory
|
||||
// leaks (see crbug.com/870054).
|
||||
class CodeRangeAddressHint {
|
||||
public:
|
||||
// Returns the most recently freed code range start address for the given
|
||||
// size. If there is no such entry, then a random address is returned.
|
||||
V8_EXPORT_PRIVATE Address GetAddressHint(size_t code_range_size);
|
||||
|
||||
V8_EXPORT_PRIVATE void NotifyFreedCodeRange(Address code_range_start,
|
||||
size_t code_range_size);
|
||||
|
||||
private:
|
||||
base::Mutex mutex_;
|
||||
// A map from code range size to an array of recently freed code range
|
||||
// addresses. There should be O(1) different code range sizes.
|
||||
// The length of each array is limited by the peak number of code ranges,
|
||||
// which should be also O(1).
|
||||
std::unordered_map<size_t, std::vector<Address>> recently_freed_;
|
||||
};
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// A space acquires chunks of memory from the operating system. The memory
|
||||
// allocator allocates and deallocates pages for the paged heap spaces and large
|
||||
@ -172,8 +152,9 @@ class MemoryAllocator {
|
||||
V8_EXPORT_PRIVATE static base::AddressRegion ComputeDiscardMemoryArea(
|
||||
Address addr, size_t size);
|
||||
|
||||
V8_EXPORT_PRIVATE MemoryAllocator(Isolate* isolate, size_t max_capacity,
|
||||
size_t code_range_size);
|
||||
V8_EXPORT_PRIVATE MemoryAllocator(Isolate* isolate,
|
||||
v8::PageAllocator* code_page_allocator,
|
||||
size_t max_capacity);
|
||||
|
||||
V8_EXPORT_PRIVATE void TearDown();
|
||||
|
||||
@ -283,17 +264,6 @@ class MemoryAllocator {
|
||||
: data_page_allocator_;
|
||||
}
|
||||
|
||||
// A region of memory that may contain executable code including reserved
|
||||
// OS page with read-write access in the beginning.
|
||||
const base::AddressRegion& code_range() const {
|
||||
// |code_range_| >= |optional RW pages| + |code_page_allocator_instance_|
|
||||
DCHECK_IMPLIES(!code_range_.is_empty(), code_page_allocator_instance_);
|
||||
DCHECK_IMPLIES(!code_range_.is_empty(),
|
||||
code_range_.contains(code_page_allocator_instance_->begin(),
|
||||
code_page_allocator_instance_->size()));
|
||||
return code_range_;
|
||||
}
|
||||
|
||||
Unmapper* unmapper() { return &unmapper_; }
|
||||
|
||||
// Performs all necessary bookkeeping to free the memory, but does not free
|
||||
@ -306,9 +276,6 @@ class MemoryAllocator {
|
||||
void RegisterReadOnlyMemory(ReadOnlyPage* page);
|
||||
|
||||
private:
|
||||
void InitializeCodePageAllocator(v8::PageAllocator* page_allocator,
|
||||
size_t requested);
|
||||
|
||||
// PreFreeMemory logically frees the object, i.e., it unregisters the
|
||||
// memory, logs a delete event and adds the chunk to remembered unmapped
|
||||
// pages.
|
||||
@ -360,10 +327,6 @@ class MemoryAllocator {
|
||||
|
||||
Isolate* isolate_;
|
||||
|
||||
// This object controls virtual space reserved for code on the V8 heap. This
|
||||
// is only valid for 64-bit architectures where kRequiresCodeRange.
|
||||
VirtualMemory code_reservation_;
|
||||
|
||||
// Page allocator used for allocating data pages. Depending on the
|
||||
// configuration it may be a page allocator instance provided by
|
||||
// v8::Platform or a BoundedPageAllocator (when pointer compression is
|
||||
@ -371,29 +334,12 @@ class MemoryAllocator {
|
||||
v8::PageAllocator* data_page_allocator_;
|
||||
|
||||
// Page allocator used for allocating code pages. Depending on the
|
||||
// configuration it may be a page allocator instance provided by
|
||||
// v8::Platform or a BoundedPageAllocator (when pointer compression is
|
||||
// enabled or on those 64-bit architectures where pc-relative 32-bit
|
||||
// configuration it may be a page allocator instance provided by v8::Platform
|
||||
// or a BoundedPageAllocator from Heap::code_range_ (when pointer compression
|
||||
// is enabled or on those 64-bit architectures where pc-relative 32-bit
|
||||
// displacement can be used for call and jump instructions).
|
||||
v8::PageAllocator* code_page_allocator_;
|
||||
|
||||
// A part of the |code_reservation_| that may contain executable code
|
||||
// including reserved page with read-write access in the beginning.
|
||||
// See details below.
|
||||
base::AddressRegion code_range_;
|
||||
|
||||
// This unique pointer owns the instance of bounded code allocator
|
||||
// that controls executable pages allocation. It does not control the
|
||||
// optionally existing page in the beginning of the |code_range_|.
|
||||
// So, summarizing all above, the following conditions hold:
|
||||
// 1) |code_reservation_| >= |code_range_|
|
||||
// 2) |code_range_| >= |optional RW pages| +
|
||||
// |code_page_allocator_instance_|. 3) |code_reservation_| is
|
||||
// AllocatePageSize()-aligned 4) |code_page_allocator_instance_| is
|
||||
// MemoryChunk::kAlignment-aligned 5) |code_range_| is
|
||||
// CommitPageSize()-aligned
|
||||
std::unique_ptr<base::BoundedPageAllocator> code_page_allocator_instance_;
|
||||
|
||||
// Maximum space size in bytes.
|
||||
size_t capacity_;
|
||||
|
||||
|
@ -3,9 +3,11 @@
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/init/isolate-allocator.h"
|
||||
|
||||
#include "src/base/bounded-page-allocator.h"
|
||||
#include "src/common/ptr-compr.h"
|
||||
#include "src/execution/isolate.h"
|
||||
#include "src/heap/code-range.h"
|
||||
#include "src/utils/memcopy.h"
|
||||
#include "src/utils/utils.h"
|
||||
|
||||
@ -60,6 +62,10 @@ DEFINE_LAZY_LEAKY_OBJECT_GETTER(VirtualMemoryCage, GetProcessWidePtrComprCage)
|
||||
|
||||
// static
|
||||
void IsolateAllocator::FreeProcessWidePtrComprCageForTesting() {
|
||||
if (std::shared_ptr<CodeRange> code_range =
|
||||
CodeRange::GetProcessWideCodeRange()) {
|
||||
code_range->Free();
|
||||
}
|
||||
GetProcessWidePtrComprCage()->Free();
|
||||
}
|
||||
#endif // V8_COMPRESS_POINTERS_IN_SHARED_CAGE
|
||||
@ -69,9 +75,10 @@ void IsolateAllocator::InitializeOncePerProcess() {
|
||||
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
|
||||
PtrComprCageReservationParams params;
|
||||
if (!GetProcessWidePtrComprCage()->InitReservation(params)) {
|
||||
V8::FatalProcessOutOfMemory(nullptr,
|
||||
"Failed to reserve memory for process-wide V8 "
|
||||
"pointer compression cage");
|
||||
V8::FatalProcessOutOfMemory(
|
||||
nullptr,
|
||||
"Failed to reserve virtual memory for process-wide V8 "
|
||||
"pointer compression cage");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
@ -234,7 +234,7 @@ bool TickSample::GetStackSample(Isolate* v8_isolate, RegisterState* regs,
|
||||
// TODO(petermarshall): Code range is always null on ia32 so this check for
|
||||
// IsNoFrameRegion will never actually run there.
|
||||
if (regs->pc &&
|
||||
isolate->heap()->memory_allocator()->code_range().contains(
|
||||
isolate->heap()->code_region().contains(
|
||||
reinterpret_cast<i::Address>(regs->pc)) &&
|
||||
IsNoFrameRegion(reinterpret_cast<i::Address>(regs->pc))) {
|
||||
// The frame is not setup, so it'd be hard to iterate the stack. Bailout.
|
||||
|
@ -218,7 +218,7 @@ class VirtualMemory final {
|
||||
|
||||
v8::PageAllocator* page_allocator() { return page_allocator_; }
|
||||
|
||||
base::AddressRegion region() const { return region_; }
|
||||
const base::AddressRegion& region() const { return region_; }
|
||||
|
||||
// Returns the start address of the reserved memory.
|
||||
// If the memory was reserved with an alignment, this address is not
|
||||
|
@ -7111,8 +7111,7 @@ UNINITIALIZED_TEST(HeapLimit) {
|
||||
TEST(NoCodeRangeInJitlessMode) {
|
||||
if (!FLAG_jitless) return;
|
||||
CcTest::InitializeVM();
|
||||
CHECK(
|
||||
CcTest::i_isolate()->heap()->memory_allocator()->code_range().is_empty());
|
||||
CHECK(CcTest::i_isolate()->heap()->code_region().is_empty());
|
||||
}
|
||||
|
||||
TEST(Regress978156) {
|
||||
|
@ -53,15 +53,16 @@ namespace heap {
|
||||
class V8_NODISCARD TestMemoryAllocatorScope {
|
||||
public:
|
||||
TestMemoryAllocatorScope(Isolate* isolate, size_t max_capacity,
|
||||
size_t code_range_size,
|
||||
PageAllocator* page_allocator = nullptr)
|
||||
: isolate_(isolate),
|
||||
old_allocator_(std::move(isolate->heap()->memory_allocator_)) {
|
||||
// Save the code pages for restoring them later on because the constructor
|
||||
// of MemoryAllocator will change them.
|
||||
isolate->GetCodePages()->swap(code_pages_);
|
||||
isolate->heap()->memory_allocator_.reset(
|
||||
new MemoryAllocator(isolate, max_capacity, code_range_size));
|
||||
isolate->heap()->memory_allocator_.reset(new MemoryAllocator(
|
||||
isolate,
|
||||
page_allocator != nullptr ? page_allocator : isolate->page_allocator(),
|
||||
max_capacity));
|
||||
if (page_allocator != nullptr) {
|
||||
isolate->heap()->memory_allocator_->data_page_allocator_ = page_allocator;
|
||||
}
|
||||
@ -113,8 +114,7 @@ static void VerifyMemoryChunk(Isolate* isolate, Heap* heap,
|
||||
v8::PageAllocator* code_page_allocator,
|
||||
size_t reserve_area_size, size_t commit_area_size,
|
||||
Executability executable, Space* space) {
|
||||
TestMemoryAllocatorScope test_allocator_scope(isolate, heap->MaxReserved(),
|
||||
0);
|
||||
TestMemoryAllocatorScope test_allocator_scope(isolate, heap->MaxReserved());
|
||||
MemoryAllocator* memory_allocator = test_allocator_scope.allocator();
|
||||
TestCodePageAllocatorScope test_code_page_allocator_scope(
|
||||
isolate, code_page_allocator);
|
||||
@ -190,8 +190,7 @@ TEST(MemoryAllocator) {
|
||||
Isolate* isolate = CcTest::i_isolate();
|
||||
Heap* heap = isolate->heap();
|
||||
|
||||
TestMemoryAllocatorScope test_allocator_scope(isolate, heap->MaxReserved(),
|
||||
0);
|
||||
TestMemoryAllocatorScope test_allocator_scope(isolate, heap->MaxReserved());
|
||||
MemoryAllocator* memory_allocator = test_allocator_scope.allocator();
|
||||
|
||||
int total_pages = 0;
|
||||
@ -272,8 +271,7 @@ TEST(NewSpace) {
|
||||
if (FLAG_single_generation) return;
|
||||
Isolate* isolate = CcTest::i_isolate();
|
||||
Heap* heap = isolate->heap();
|
||||
TestMemoryAllocatorScope test_allocator_scope(isolate, heap->MaxReserved(),
|
||||
0);
|
||||
TestMemoryAllocatorScope test_allocator_scope(isolate, heap->MaxReserved());
|
||||
MemoryAllocator* memory_allocator = test_allocator_scope.allocator();
|
||||
|
||||
NewSpace new_space(heap, memory_allocator->data_page_allocator(),
|
||||
@ -296,8 +294,7 @@ TEST(NewSpace) {
|
||||
TEST(OldSpace) {
|
||||
Isolate* isolate = CcTest::i_isolate();
|
||||
Heap* heap = isolate->heap();
|
||||
TestMemoryAllocatorScope test_allocator_scope(isolate, heap->MaxReserved(),
|
||||
0);
|
||||
TestMemoryAllocatorScope test_allocator_scope(isolate, heap->MaxReserved());
|
||||
|
||||
OldSpace* s = new OldSpace(heap);
|
||||
CHECK_NOT_NULL(s);
|
||||
@ -797,8 +794,7 @@ TEST(NoMemoryForNewPage) {
|
||||
|
||||
// Memory allocator that will fail to allocate any pages.
|
||||
FailingPageAllocator failing_allocator;
|
||||
TestMemoryAllocatorScope test_allocator_scope(isolate, 0, 0,
|
||||
&failing_allocator);
|
||||
TestMemoryAllocatorScope test_allocator_scope(isolate, 0, &failing_allocator);
|
||||
MemoryAllocator* memory_allocator = test_allocator_scope.allocator();
|
||||
OldSpace faked_space(heap);
|
||||
Page* page = memory_allocator->AllocatePage(
|
||||
|
@ -97,17 +97,16 @@ TEST(CodeRangeCorrectContents) {
|
||||
|
||||
std::vector<MemoryRange>* pages = i_isolate->GetCodePages();
|
||||
|
||||
const base::AddressRegion& code_range =
|
||||
i_isolate->heap()->memory_allocator()->code_range();
|
||||
CHECK(!code_range.is_empty());
|
||||
const base::AddressRegion& code_region = i_isolate->heap()->code_region();
|
||||
CHECK(!code_region.is_empty());
|
||||
// We should only have the code range and the embedded code range.
|
||||
CHECK_EQ(2, pages->size());
|
||||
CHECK(PagesHasExactPage(pages, code_range.begin(), code_range.size()));
|
||||
CHECK(PagesHasExactPage(pages, code_region.begin(), code_region.size()));
|
||||
CHECK(PagesHasExactPage(
|
||||
pages, reinterpret_cast<Address>(i_isolate->CurrentEmbeddedBlobCode()),
|
||||
i_isolate->CurrentEmbeddedBlobCodeSize()));
|
||||
if (i_isolate->is_short_builtin_calls_enabled()) {
|
||||
// In this case embedded blob code must be included via code_range.
|
||||
// In this case embedded blob code must be included via code_region.
|
||||
CHECK(PagesContainsRange(
|
||||
pages, reinterpret_cast<Address>(i_isolate->embedded_blob_code()),
|
||||
i_isolate->embedded_blob_code_size()));
|
||||
@ -129,9 +128,8 @@ TEST(CodePagesCorrectContents) {
|
||||
// There might be other pages already.
|
||||
CHECK_GE(pages->size(), 1);
|
||||
|
||||
const base::AddressRegion& code_range =
|
||||
i_isolate->heap()->memory_allocator()->code_range();
|
||||
CHECK(code_range.is_empty());
|
||||
const base::AddressRegion& code_region = i_isolate->heap()->code_region();
|
||||
CHECK(code_region.is_empty());
|
||||
|
||||
// We should have the embedded code range even when there is no regular code
|
||||
// range.
|
||||
|
@ -939,13 +939,6 @@ TEST(ExtensionsRegistration) {
|
||||
const int kNThreads = 10;
|
||||
#elif V8_TARGET_ARCH_S390 && V8_TARGET_ARCH_32_BIT
|
||||
const int kNThreads = 10;
|
||||
#elif V8_COMPRESS_POINTERS_IN_SHARED_CAGE
|
||||
// TODO(syg): Support larger cages or tweak kMaximalCodeRangeSize.
|
||||
//
|
||||
// Isolates reserve kMaximalCodeRangeSize of virtual memory. A shared pointer
|
||||
// compression cage is 4GB and kMaximalCodeRangeSize is 128MB on arm64 and
|
||||
// x64, giving us a maximum of ~33.
|
||||
const int kNThreads = 30;
|
||||
#else
|
||||
const int kNThreads = 40;
|
||||
#endif
|
||||
|
@ -38,6 +38,23 @@ UNINITIALIZED_TEST(PtrComprCageAndIsolateRoot) {
|
||||
isolate2->Dispose();
|
||||
}
|
||||
|
||||
UNINITIALIZED_TEST(PtrComprCageCodeRange) {
|
||||
v8::Isolate::CreateParams create_params;
|
||||
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
|
||||
|
||||
v8::Isolate* isolate = v8::Isolate::New(create_params);
|
||||
Isolate* i_isolate = reinterpret_cast<Isolate*>(isolate);
|
||||
|
||||
VirtualMemoryCage* cage = i_isolate->GetPtrComprCage();
|
||||
if (i_isolate->RequiresCodeRange()) {
|
||||
CHECK(!i_isolate->heap()->code_region().is_empty());
|
||||
CHECK(cage->reservation()->InVM(i_isolate->heap()->code_region().begin(),
|
||||
i_isolate->heap()->code_region().size()));
|
||||
}
|
||||
|
||||
isolate->Dispose();
|
||||
}
|
||||
|
||||
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
|
||||
UNINITIALIZED_TEST(SharedPtrComprCage) {
|
||||
v8::Isolate::CreateParams create_params;
|
||||
@ -65,6 +82,24 @@ UNINITIALIZED_TEST(SharedPtrComprCage) {
|
||||
isolate1->Dispose();
|
||||
isolate2->Dispose();
|
||||
}
|
||||
|
||||
UNINITIALIZED_TEST(SharedPtrComprCageCodeRange) {
|
||||
v8::Isolate::CreateParams create_params;
|
||||
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
|
||||
|
||||
v8::Isolate* isolate1 = v8::Isolate::New(create_params);
|
||||
Isolate* i_isolate1 = reinterpret_cast<Isolate*>(isolate1);
|
||||
v8::Isolate* isolate2 = v8::Isolate::New(create_params);
|
||||
Isolate* i_isolate2 = reinterpret_cast<Isolate*>(isolate2);
|
||||
|
||||
if (i_isolate1->RequiresCodeRange() || i_isolate2->RequiresCodeRange()) {
|
||||
CHECK_EQ(i_isolate1->heap()->code_region(),
|
||||
i_isolate2->heap()->code_region());
|
||||
}
|
||||
|
||||
isolate1->Dispose();
|
||||
isolate2->Dispose();
|
||||
}
|
||||
#endif // V8_COMPRESS_POINTERS_IN_SHARED_CAGE
|
||||
|
||||
} // namespace internal
|
||||
|
@ -1485,13 +1485,6 @@
|
||||
'concurrent-initial-prototype-change-1': [SKIP],
|
||||
}], # variant == concurrent_inlining
|
||||
|
||||
##############################################################################
|
||||
['pointer_compression_shared_cage', {
|
||||
# kMaximalCodeRangeSize causing VM exhaustion with 50 workers when sharing a
|
||||
# pointer cage.
|
||||
'regress/wasm/regress-1010272': [SKIP],
|
||||
}],
|
||||
|
||||
################################################################################
|
||||
['single_generation', {
|
||||
# These tests rely on allocation site tracking which only works in the young generation.
|
||||
|
Loading…
Reference in New Issue
Block a user