Reland "[rwx][mac] Support fast W^X permission switching on Apple Silicon (M1)"

This is a reland of commit 9d31f8663a
There were issues with --future flag implications on M1.

Original change's description:
> [rwx][mac] Support fast W^X permission switching on Apple Silicon (M1)
>
> ... for V8 code space. The feature is currently disabled.
>
> In order to use fast W^X permission switching we must allocate
> executable pages with readable writable executable permissions (RWX).
> However, MacOS on ARM64 ("Apple M1"/Apple Silicon) prohibits further
> permission changing of RWX memory pages. This means that the code page
> headers must be allocated with RWX permissions too because otherwise
> it wouldn't be possible to allocate a large code page over the freed
> regular code page and vice versa.
>
> When enabled, the new machinery works as follows:
>
> 1) when memory region is reserved for allocating executable pages, the
>    whole region is committed with RWX permissions and then decommitted,
> 2) since reconfiguration of RWX page permissions is not allowed on
>    MacOS on ARM64 ("Apple M1"/Apple Silicon), there must be no attempts
>    to change them,
> 3) the request to set RWX permissions in the executable page region
>    just recommits the pages without changing permissions (see (1), they
>    were already allocated as RWX and then discarded),
> 4) in order to make executable pages inaccessible one must use
>    OS::DiscardSystemPages() instead of OS::DecommitPages() or
>    setting permissions to kNoAccess because the latter two are not
>    allowed by the MacOS (see (2)).
> 5) since code space page headers are allocated as RWX pages it's also
>    necessary to switch between W^X modes when updating the data in the
>    page headers (i.e. when marking, updating stats, wiring pages in
>    lists, etc.). The new CodePageHeaderModificationScope class is used
>    in the respective places. On unrelated configurations it's a no-op.
>
> The fast permission switching can't be used for V8 configuration with
> enabled pointer compression and disabled external code space because
> a) the pointer compression cage has to be reserved with MAP_JIT flag
>    which is too expensive,
> b) in case of shared pointer compression cage if the code range will
>    be deleted while the cage is still alive then attempt to configure
>    permissions of pages that were previously set to RWX will fail.
>
> This also CL extends the unmapper unit tests with permissions tracking
> for discarded pages.
>
> Bug: v8:12797
> Change-Id: Idb28cbc481306477589eee9962d2e75167d87c61
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3579303
> Reviewed-by: Nico Hartmann <nicohartmann@chromium.org>
> Reviewed-by: Clemens Backes <clemensb@chromium.org>
> Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
> Commit-Queue: Igor Sheludko <ishell@chromium.org>
> Cr-Commit-Position: refs/heads/main@{#80238}

Bug: v8:12797
Change-Id: I0fe86666f31bad37d7074e217555c95900d2afba
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3610433
Reviewed-by: Nico Hartmann <nicohartmann@chromium.org>
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Reviewed-by: Clemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/main@{#80259}
This commit is contained in:
Igor Sheludko 2022-04-28 12:14:12 +02:00 committed by V8 LUCI CQ
parent 03b69480db
commit 449ece383b
33 changed files with 432 additions and 121 deletions

View File

@ -11,7 +11,7 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
RwxMemoryWriteScope::RwxMemoryWriteScope() { SetWritable(); } RwxMemoryWriteScope::RwxMemoryWriteScope(const char* comment) { SetWritable(); }
RwxMemoryWriteScope::~RwxMemoryWriteScope() { SetExecutable(); } RwxMemoryWriteScope::~RwxMemoryWriteScope() { SetExecutable(); }

View File

@ -32,7 +32,9 @@ class CodeSpaceWriteScope;
// The scope is reentrant and thread safe. // The scope is reentrant and thread safe.
class V8_NODISCARD RwxMemoryWriteScope final { class V8_NODISCARD RwxMemoryWriteScope final {
public: public:
V8_INLINE RwxMemoryWriteScope(); // The comment argument is used only for ensuring that explanation about why
// the scope is needed is given at particular use case.
V8_INLINE explicit RwxMemoryWriteScope(const char* comment);
V8_INLINE ~RwxMemoryWriteScope(); V8_INLINE ~RwxMemoryWriteScope();
// Disable copy constructor and copy-assignment operator, since this manages // Disable copy constructor and copy-assignment operator, since this manages
@ -59,6 +61,16 @@ class V8_NODISCARD RwxMemoryWriteScope final {
#endif // V8_HAS_PTHREAD_JIT_WRITE_PROTECT #endif // V8_HAS_PTHREAD_JIT_WRITE_PROTECT
}; };
// This class is a no-op version of the RwxMemoryWriteScope class above.
// It's used as a target type for other scope type definitions when a no-op
// semantics is required.
class V8_NODISCARD NopRwxMemoryWriteScope final {
public:
V8_INLINE explicit NopRwxMemoryWriteScope(const char* comment) {
// Define a constructor to avoid unused variable warnings.
}
};
// Same as the RwxMemoryWriteScope but without inlining the code. // Same as the RwxMemoryWriteScope but without inlining the code.
// This is a workaround for component build issue (crbug/1316800), when // This is a workaround for component build issue (crbug/1316800), when
// a thread_local value can't be properly exported. // a thread_local value can't be properly exported.

View File

@ -153,6 +153,47 @@ class Code;
using CodeT = Code; using CodeT = Code;
#endif #endif
// V8_HEAP_USE_PTHREAD_JIT_WRITE_PROTECT controls how V8 sets permissions for
// executable pages.
// In particular,
// 1) when memory region is reserved for code range, the whole region is
// committed with RWX permissions and then the whole region is discarded,
// 2) since reconfiguration of RWX page permissions is not allowed on MacOS on
// ARM64 ("Apple M1"/Apple Silicon), there must be no attempts to change
// them,
// 3) the request to set RWX permissions in the execeutable page region just
// commits the pages without changing permissions (see (1), they were already
// allocated as RWX and then deommitted),
// 4) in order to make executable pages inaccessible one must use
// OS::DiscardSystemPages() instead of using OS::DecommitPages() or setting
// permissions to kNoAccess because the latter two are not allowed by the
// MacOS (see (2)).
// 5) since code space page headers are allocated as RWX pages it's also
// necessary to switch between W^X modes when updating the data in the
// page headers (i.e. when marking, updating stats, wiring pages in
// lists, etc.). The new CodePageHeaderModificationScope class is used
// in the respective places. On unrelated configurations it's a no-op.
//
// This is applicable only to MacOS on ARM64 ("Apple M1"/Apple Silicon) which
// has a APRR/MAP_JIT machinery for fast W^X permission switching (see
// pthread_jit_write_protect).
//
// This approach doesn't work and shouldn't be used for V8 configuration with
// enabled pointer compression and disabled external code space because
// a) the pointer compression cage has to be reserved with MAP_JIT flag which
// is too expensive,
// b) in case of shared pointer compression cage if the code range will be
// deleted while the cage is still alive then attempt to configure
// permissions of pages that were previously set to RWX will fail.
//
#if V8_HAS_PTHREAD_JIT_WRITE_PROTECT && \
!(defined(V8_COMPRESS_POINTERS) && !defined(V8_EXTERNAL_CODE_SPACE))
// TODO(v8:12797): enable fast W^X permissions switching on Apple Silicon.
#define V8_HEAP_USE_PTHREAD_JIT_WRITE_PROTECT false
#else
#define V8_HEAP_USE_PTHREAD_JIT_WRITE_PROTECT false
#endif
// Determine whether tagged pointers are 8 bytes (used in Torque layouts for // Determine whether tagged pointers are 8 bytes (used in Torque layouts for
// choosing where to insert padding). // choosing where to insert padding).
#if V8_TARGET_ARCH_64_BIT && !defined(V8_COMPRESS_POINTERS) #if V8_TARGET_ARCH_64_BIT && !defined(V8_COMPRESS_POINTERS)

View File

@ -1260,6 +1260,9 @@ PipelineCompilationJob::Status PipelineCompilationJob::FinalizeJobImpl(
compilation_info()->SetCode(code); compilation_info()->SetCode(code);
Handle<NativeContext> context(compilation_info()->native_context(), isolate); Handle<NativeContext> context(compilation_info()->native_context(), isolate);
if (CodeKindCanDeoptimize(code->kind())) { if (CodeKindCanDeoptimize(code->kind())) {
CodeTPageHeaderModificationScope rwx_write_scope(
"Storing a CodeT object triggers marking barrier which requires "
"write access to the CodeT page header");
context->AddOptimizedCode(ToCodeT(*code)); context->AddOptimizedCode(ToCodeT(*code));
} }
RegisterWeakObjectsInOptimizedCode(isolate, context, code); RegisterWeakObjectsInOptimizedCode(isolate, context, code);

View File

@ -343,6 +343,9 @@ void Deoptimizer::DeoptimizeMarkedCodeForContext(NativeContext native_context) {
if (code.marked_for_deoptimization()) { if (code.marked_for_deoptimization()) {
codes.insert(code); codes.insert(code);
CodeTPageHeaderModificationScope rwx_write_scope(
"Storing a CodeT object triggers marking barrier which requires "
"write access to the CodeT page header");
if (!prev.is_null()) { if (!prev.is_null()) {
// Skip this code in the optimized code list. // Skip this code in the optimized code list.
prev.set_next_code_link(next); prev.set_next_code_link(next);

View File

@ -1537,6 +1537,8 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory {
bool force_slow_path() const { return force_slow_path_; } bool force_slow_path() const { return force_slow_path_; }
bool* force_slow_path_address() { return &force_slow_path_; } bool* force_slow_path_address() { return &force_slow_path_; }
bool jitless() const { return jitless_; }
DebugInfo::ExecutionMode* debug_execution_mode_address() { DebugInfo::ExecutionMode* debug_execution_mode_address() {
return &debug_execution_mode_; return &debug_execution_mode_;
} }

View File

@ -210,13 +210,6 @@ struct MaybeBoolFlag {
#define ENABLE_SPARKPLUG_BY_DEFAULT false #define ENABLE_SPARKPLUG_BY_DEFAULT false
#endif #endif
#if defined(V8_OS_DARWIN) && defined(V8_HOST_ARCH_ARM64)
// Must be enabled on M1.
#define MUST_WRITE_PROTECT_CODE_MEMORY true
#else
#define MUST_WRITE_PROTECT_CODE_MEMORY false
#endif
// Supported ARM configurations are: // Supported ARM configurations are:
// "armv6": ARMv6 + VFPv2 // "armv6": ARMv6 + VFPv2
// "armv7": ARMv7 + VFPv3-D32 + NEON // "armv7": ARMv7 + VFPv3-D32 + NEON
@ -532,9 +525,7 @@ DEFINE_WEAK_IMPLICATION(future, flush_baseline_code)
#if V8_SHORT_BUILTIN_CALLS #if V8_SHORT_BUILTIN_CALLS
DEFINE_WEAK_IMPLICATION(future, short_builtin_calls) DEFINE_WEAK_IMPLICATION(future, short_builtin_calls)
#endif #endif
#if !MUST_WRITE_PROTECT_CODE_MEMORY
DEFINE_WEAK_VALUE_IMPLICATION(future, write_protect_code_memory, false) DEFINE_WEAK_VALUE_IMPLICATION(future, write_protect_code_memory, false)
#endif
DEFINE_WEAK_IMPLICATION(future, compact_maps) DEFINE_WEAK_IMPLICATION(future, compact_maps)
DEFINE_BOOL_READONLY(dict_property_const_tracking, DEFINE_BOOL_READONLY(dict_property_const_tracking,
@ -753,14 +744,9 @@ DEFINE_BOOL(
always_use_string_forwarding_table, false, always_use_string_forwarding_table, false,
"use string forwarding table instead of thin strings for all strings") "use string forwarding table instead of thin strings for all strings")
#if !defined(V8_OS_DARWIN) || !defined(V8_HOST_ARCH_ARM64)
DEFINE_BOOL(write_code_using_rwx, true, DEFINE_BOOL(write_code_using_rwx, true,
"flip permissions to rwx to write page instead of rw") "flip permissions to rwx to write page instead of rw")
DEFINE_NEG_IMPLICATION(jitless, write_code_using_rwx) DEFINE_NEG_IMPLICATION(jitless, write_code_using_rwx)
#else
DEFINE_BOOL_READONLY(write_code_using_rwx, false,
"flip permissions to rwx to write page instead of rw")
#endif
// Flags for concurrent recompilation. // Flags for concurrent recompilation.
DEFINE_BOOL(concurrent_recompilation, true, DEFINE_BOOL(concurrent_recompilation, true,
@ -1271,12 +1257,7 @@ DEFINE_INT(scavenge_task_trigger, 80,
DEFINE_BOOL(scavenge_separate_stack_scanning, false, DEFINE_BOOL(scavenge_separate_stack_scanning, false,
"use a separate phase for stack scanning in scavenge") "use a separate phase for stack scanning in scavenge")
DEFINE_BOOL(trace_parallel_scavenge, false, "trace parallel scavenge") DEFINE_BOOL(trace_parallel_scavenge, false, "trace parallel scavenge")
#if MUST_WRITE_PROTECT_CODE_MEMORY
DEFINE_BOOL_READONLY(write_protect_code_memory, true,
"write protect code memory")
#else
DEFINE_BOOL(write_protect_code_memory, true, "write protect code memory") DEFINE_BOOL(write_protect_code_memory, true, "write protect code memory")
#endif
#if defined(V8_ATOMIC_OBJECT_FIELD_WRITES) #if defined(V8_ATOMIC_OBJECT_FIELD_WRITES)
#define V8_CONCURRENT_MARKING_BOOL true #define V8_CONCURRENT_MARKING_BOOL true
#else #else
@ -2082,9 +2063,7 @@ DEFINE_PERF_PROF_BOOL(
"Remove the perf file right after creating it (for testing only).") "Remove the perf file right after creating it (for testing only).")
DEFINE_NEG_IMPLICATION(perf_prof, compact_code_space) DEFINE_NEG_IMPLICATION(perf_prof, compact_code_space)
// TODO(v8:8462) Remove implication once perf supports remapping. // TODO(v8:8462) Remove implication once perf supports remapping.
#if !MUST_WRITE_PROTECT_CODE_MEMORY
DEFINE_NEG_IMPLICATION(perf_prof, write_protect_code_memory) DEFINE_NEG_IMPLICATION(perf_prof, write_protect_code_memory)
#endif
#if V8_ENABLE_WEBASSEMBLY #if V8_ENABLE_WEBASSEMBLY
DEFINE_NEG_IMPLICATION(perf_prof, wasm_write_protect_code_memory) DEFINE_NEG_IMPLICATION(perf_prof, wasm_write_protect_code_memory)
#endif // V8_ENABLE_WEBASSEMBLY #endif // V8_ENABLE_WEBASSEMBLY

View File

@ -131,6 +131,7 @@ bool CodeRange::InitReservation(v8::PageAllocator* page_allocator,
params.page_size = MemoryChunk::kPageSize; params.page_size = MemoryChunk::kPageSize;
params.requested_start_hint = params.requested_start_hint =
GetCodeRangeAddressHint()->GetAddressHint(requested, allocate_page_size); GetCodeRangeAddressHint()->GetAddressHint(requested, allocate_page_size);
params.jit = JitPermission::kMapAsJittable;
if (!VirtualMemoryCage::InitReservation(params)) return false; if (!VirtualMemoryCage::InitReservation(params)) return false;
@ -155,7 +156,14 @@ bool CodeRange::InitReservation(v8::PageAllocator* page_allocator,
return false; return false;
} }
} }
if (V8_HEAP_USE_PTHREAD_JIT_WRITE_PROTECT &&
params.jit == JitPermission::kMapAsJittable) {
void* base = reinterpret_cast<void*>(page_allocator_->begin());
size_t size = page_allocator_->size();
CHECK(params.page_allocator->SetPermissions(
base, size, PageAllocator::kReadWriteExecute));
CHECK(params.page_allocator->DiscardSystemPages(base, size));
}
return true; return true;
} }
@ -234,19 +242,31 @@ uint8_t* CodeRange::RemapEmbeddedBuiltins(Isolate* isolate,
} }
} }
if (!page_allocator()->SetPermissions(embedded_blob_code_copy, code_size, if (V8_HEAP_USE_PTHREAD_JIT_WRITE_PROTECT) {
PageAllocator::kReadWrite)) { if (!page_allocator()->RecommitPages(embedded_blob_code_copy, code_size,
V8::FatalProcessOutOfMemory(isolate, PageAllocator::kReadWriteExecute)) {
"Re-embedded builtins: set permissions"); V8::FatalProcessOutOfMemory(isolate,
} "Re-embedded builtins: recommit pages");
memcpy(embedded_blob_code_copy, embedded_blob_code, embedded_blob_code_size); }
RwxMemoryWriteScope rwx_write_scope(
"Enable write access to copy the blob code into the code range");
memcpy(embedded_blob_code_copy, embedded_blob_code,
embedded_blob_code_size);
} else {
if (!page_allocator()->SetPermissions(embedded_blob_code_copy, code_size,
PageAllocator::kReadWrite)) {
V8::FatalProcessOutOfMemory(isolate,
"Re-embedded builtins: set permissions");
}
memcpy(embedded_blob_code_copy, embedded_blob_code,
embedded_blob_code_size);
if (!page_allocator()->SetPermissions(embedded_blob_code_copy, code_size, if (!page_allocator()->SetPermissions(embedded_blob_code_copy, code_size,
PageAllocator::kReadExecute)) { PageAllocator::kReadExecute)) {
V8::FatalProcessOutOfMemory(isolate, V8::FatalProcessOutOfMemory(isolate,
"Re-embedded builtins: set permissions"); "Re-embedded builtins: set permissions");
}
} }
embedded_blob_code_copy_.store(embedded_blob_code_copy, embedded_blob_code_copy_.store(embedded_blob_code_copy,
std::memory_order_release); std::memory_order_release);
return embedded_blob_code_copy; return embedded_blob_code_copy;

View File

@ -494,6 +494,8 @@ void ConcurrentMarking::Run(JobDelegate* delegate,
} }
bool is_per_context_mode = local_marking_worklists.IsPerContextMode(); bool is_per_context_mode = local_marking_worklists.IsPerContextMode();
bool done = false; bool done = false;
CodePageHeaderModificationScope rwx_write_scope(
"Marking a Code object requires write access to the Code page header");
while (!done) { while (!done) {
size_t current_marked_bytes = 0; size_t current_marked_bytes = 0;
int objects_processed = 0; int objects_processed = 0;

View File

@ -16,6 +16,7 @@
#include "src/base/platform/platform.h" #include "src/base/platform/platform.h"
#include "src/base/sanitizer/msan.h" #include "src/base/sanitizer/msan.h"
#include "src/common/assert-scope.h" #include "src/common/assert-scope.h"
#include "src/common/code-memory-access-inl.h"
#include "src/execution/isolate-data.h" #include "src/execution/isolate-data.h"
#include "src/execution/isolate.h" #include "src/execution/isolate.h"
#include "src/heap/code-object-registry.h" #include "src/heap/code-object-registry.h"
@ -600,6 +601,9 @@ CodeSpaceMemoryModificationScope::CodeSpaceMemoryModificationScope(Heap* heap)
: heap_(heap) { : heap_(heap) {
DCHECK_EQ(ThreadId::Current(), heap_->isolate()->thread_id()); DCHECK_EQ(ThreadId::Current(), heap_->isolate()->thread_id());
heap_->safepoint()->AssertActive(); heap_->safepoint()->AssertActive();
if (V8_HEAP_USE_PTHREAD_JIT_WRITE_PROTECT) {
RwxMemoryWriteScope::SetWritable();
}
if (heap_->write_protect_code_memory()) { if (heap_->write_protect_code_memory()) {
heap_->increment_code_space_memory_modification_scope_depth(); heap_->increment_code_space_memory_modification_scope_depth();
heap_->code_space()->SetCodeModificationPermissions(); heap_->code_space()->SetCodeModificationPermissions();
@ -625,11 +629,17 @@ CodeSpaceMemoryModificationScope::~CodeSpaceMemoryModificationScope() {
page = page->next_page(); page = page->next_page();
} }
} }
if (V8_HEAP_USE_PTHREAD_JIT_WRITE_PROTECT) {
RwxMemoryWriteScope::SetExecutable();
}
} }
CodePageCollectionMemoryModificationScope:: CodePageCollectionMemoryModificationScope::
CodePageCollectionMemoryModificationScope(Heap* heap) CodePageCollectionMemoryModificationScope(Heap* heap)
: heap_(heap) { : heap_(heap) {
if (V8_HEAP_USE_PTHREAD_JIT_WRITE_PROTECT) {
RwxMemoryWriteScope::SetWritable();
}
if (heap_->write_protect_code_memory()) { if (heap_->write_protect_code_memory()) {
heap_->IncrementCodePageCollectionMemoryModificationScopeDepth(); heap_->IncrementCodePageCollectionMemoryModificationScopeDepth();
} }
@ -643,6 +653,9 @@ CodePageCollectionMemoryModificationScope::
heap_->ProtectUnprotectedMemoryChunks(); heap_->ProtectUnprotectedMemoryChunks();
} }
} }
if (V8_HEAP_USE_PTHREAD_JIT_WRITE_PROTECT) {
RwxMemoryWriteScope::SetExecutable();
}
} }
#ifdef V8_ENABLE_THIRD_PARTY_HEAP #ifdef V8_ENABLE_THIRD_PARTY_HEAP
@ -658,6 +671,9 @@ CodePageMemoryModificationScope::CodePageMemoryModificationScope(
: chunk_(chunk), : chunk_(chunk),
scope_active_(chunk_->heap()->write_protect_code_memory() && scope_active_(chunk_->heap()->write_protect_code_memory() &&
chunk_->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) { chunk_->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
if (V8_HEAP_USE_PTHREAD_JIT_WRITE_PROTECT) {
RwxMemoryWriteScope::SetWritable();
}
if (scope_active_) { if (scope_active_) {
DCHECK(chunk_->owner()->identity() == CODE_SPACE || DCHECK(chunk_->owner()->identity() == CODE_SPACE ||
(chunk_->owner()->identity() == CODE_LO_SPACE)); (chunk_->owner()->identity() == CODE_LO_SPACE));
@ -669,6 +685,9 @@ CodePageMemoryModificationScope::~CodePageMemoryModificationScope() {
if (scope_active_) { if (scope_active_) {
MemoryChunk::cast(chunk_)->SetDefaultCodePermissions(); MemoryChunk::cast(chunk_)->SetDefaultCodePermissions();
} }
if (V8_HEAP_USE_PTHREAD_JIT_WRITE_PROTECT) {
RwxMemoryWriteScope::SetExecutable();
}
} }
IgnoreLocalGCRequests::IgnoreLocalGCRequests(Heap* heap) : heap_(heap) { IgnoreLocalGCRequests::IgnoreLocalGCRequests(Heap* heap) : heap_(heap) {

View File

@ -8,9 +8,9 @@
// Clients of this interface shouldn't depend on lots of heap internals. // Clients of this interface shouldn't depend on lots of heap internals.
// Do not include anything from src/heap here! // Do not include anything from src/heap here!
#include "src/heap/heap-write-barrier.h" #include "src/common/code-memory-access-inl.h"
#include "src/common/globals.h" #include "src/common/globals.h"
#include "src/heap/heap-write-barrier.h"
#include "src/objects/code.h" #include "src/objects/code.h"
#include "src/objects/compressed-slots-inl.h" #include "src/objects/compressed-slots-inl.h"
#include "src/objects/fixed-array.h" #include "src/objects/fixed-array.h"
@ -227,6 +227,14 @@ void WriteBarrier::Marking(HeapObject host, ObjectSlot slot, Object value) {
Marking(host, HeapObjectSlot(slot), HeapObject::cast(value)); Marking(host, HeapObjectSlot(slot), HeapObject::cast(value));
} }
void WriteBarrier::Marking(HeapObject host, ObjectSlot slot, Code value) {
DCHECK(!HasWeakHeapObjectTag(value));
if (!value.IsHeapObject()) return;
CodePageHeaderModificationScope rwx_write_scope(
"Marking a Code object requires write access to the Code page header");
Marking(host, HeapObjectSlot(slot), HeapObject::cast(value));
}
void WriteBarrier::Marking(HeapObject host, MaybeObjectSlot slot, void WriteBarrier::Marking(HeapObject host, MaybeObjectSlot slot,
MaybeObject value) { MaybeObject value) {
HeapObject value_heap_object; HeapObject value_heap_object;

View File

@ -48,6 +48,7 @@ inline bool IsReadOnlyHeapObject(HeapObject object);
class V8_EXPORT_PRIVATE WriteBarrier { class V8_EXPORT_PRIVATE WriteBarrier {
public: public:
static inline void Marking(HeapObject host, ObjectSlot, Object value); static inline void Marking(HeapObject host, ObjectSlot, Object value);
static inline void Marking(HeapObject host, ObjectSlot, Code value);
static inline void Marking(HeapObject host, HeapObjectSlot, HeapObject value); static inline void Marking(HeapObject host, HeapObjectSlot, HeapObject value);
static inline void Marking(HeapObject host, MaybeObjectSlot, static inline void Marking(HeapObject host, MaybeObjectSlot,
MaybeObject value); MaybeObject value);

View File

@ -2837,6 +2837,7 @@ void Heap::UnprotectAndRegisterMemoryChunk(MemoryChunk* chunk,
void Heap::UnprotectAndRegisterMemoryChunk(HeapObject object, void Heap::UnprotectAndRegisterMemoryChunk(HeapObject object,
UnprotectMemoryOrigin origin) { UnprotectMemoryOrigin origin) {
if (!write_protect_code_memory()) return;
UnprotectAndRegisterMemoryChunk(MemoryChunk::FromHeapObject(object), origin); UnprotectAndRegisterMemoryChunk(MemoryChunk::FromHeapObject(object), origin);
} }
@ -6203,9 +6204,14 @@ void Heap::TearDown() {
shared_map_space_ = nullptr; shared_map_space_ = nullptr;
shared_map_allocator_.reset(); shared_map_allocator_.reset();
for (int i = FIRST_MUTABLE_SPACE; i <= LAST_MUTABLE_SPACE; i++) { {
delete space_[i]; CodePageHeaderModificationScope rwx_write_scope(
space_[i] = nullptr; "Deletion of CODE_SPACE and CODE_LO_SPACE requires write access to "
"Code page headers");
for (int i = FIRST_MUTABLE_SPACE; i <= LAST_MUTABLE_SPACE; i++) {
delete space_[i];
space_[i] = nullptr;
}
} }
isolate()->read_only_heap()->OnHeapTearDown(this); isolate()->read_only_heap()->OnHeapTearDown(this);

View File

@ -96,12 +96,14 @@ class MemoryChunk;
class MemoryMeasurement; class MemoryMeasurement;
class MemoryReducer; class MemoryReducer;
class MinorMarkCompactCollector; class MinorMarkCompactCollector;
class NopRwxMemoryWriteScope;
class ObjectIterator; class ObjectIterator;
class ObjectStats; class ObjectStats;
class Page; class Page;
class PagedSpace; class PagedSpace;
class ReadOnlyHeap; class ReadOnlyHeap;
class RootVisitor; class RootVisitor;
class RwxMemoryWriteScope;
class SafepointScope; class SafepointScope;
class ScavengeJob; class ScavengeJob;
class Scavenger; class Scavenger;
@ -611,7 +613,19 @@ class Heap {
// Dump heap statistics in JSON format. // Dump heap statistics in JSON format.
void DumpJSONHeapStatistics(std::stringstream& stream); void DumpJSONHeapStatistics(std::stringstream& stream);
bool write_protect_code_memory() const { return write_protect_code_memory_; } bool write_protect_code_memory() const {
if (V8_HAS_PTHREAD_JIT_WRITE_PROTECT) {
// On MacOS on ARM64 ("Apple M1"/Apple Silicon) code modification
// protection must be used. It can be achieved by one of the following
// approaches:
// 1) switching memory protection between RW-RX as on other architectures
// => return true,
// 2) fast W^X machinery (see V8_HEAP_USE_PTHREAD_JIT_WRITE_PROTECT) which
// doesn not require memory protection changes => return false.
return !V8_HEAP_USE_PTHREAD_JIT_WRITE_PROTECT;
}
return write_protect_code_memory_;
}
uintptr_t code_space_memory_modification_scope_depth() { uintptr_t code_space_memory_modification_scope_depth() {
return code_space_memory_modification_scope_depth_; return code_space_memory_modification_scope_depth_;
@ -2595,6 +2609,43 @@ class V8_NODISCARD CodePageCollectionMemoryModificationScope {
Heap* heap_; Heap* heap_;
}; };
// The CodePageHeaderModificationScope enables write access to Code space page
// headers.
// On most of the configurations it's a no-op because Code space page headers
// are configured as writable and permissions are never changed.
// However, on MacOS on ARM64 ("Apple M1"/Apple Silicon) the situation is
// different. In order to be able to use fast W^X permissions switching
// machinery (APRR/MAP_JIT) it's necessary to configure executable memory as
// readable writable executable (RWX). Also, on MacOS on ARM64 reconfiguration
// of RWX page permissions to anything else is prohibited.
// So, in order to be able to allocate large code pages over freed regular
// code pages and vice versa we have to allocate Code page headers as RWX too
// and switch them to writable mode when it's necessary to modify the code page
// header.
// The scope can be used from any thread and affects only current thread, see
// RwxMemoryWriteScope for details about semantics of the scope.
#if V8_HEAP_USE_PTHREAD_JIT_WRITE_PROTECT
using CodePageHeaderModificationScope = RwxMemoryWriteScope;
#else
// When write protection of code page headers is not required the scope is
// a no-op.
using CodePageHeaderModificationScope = NopRwxMemoryWriteScope;
#endif // V8_HEAP_USE_PTHREAD_JIT_WRITE_PROTECT
// The CodeTPageHeaderModificationScope enables write access to CodeT objects
// page headers.
#if V8_EXTERNAL_CODE_SPACE
// When V8_EXTERNAL_CODE_SPACE is enabled this scope is no-op because CodeT
// objects are data objects and thus the page header is always in writable
// state.
using CodeTPageHeaderModificationScope = NopRwxMemoryWriteScope;
#else
// When V8_EXTERNAL_CODE_SPACE is disabled this scope is an alias to
// CodePageHeaderModificationScope because in CodeT is a Code object and thus
// write access to the page headers might be required.
using CodeTPageHeaderModificationScope = CodePageHeaderModificationScope;
#endif // V8_EXTERNAL_CODE_SPACE
// The CodePageMemoryModificationScope does not check if tansitions to // The CodePageMemoryModificationScope does not check if tansitions to
// writeable and back to executable are actually allowed, i.e. the MemoryChunk // writeable and back to executable are actually allowed, i.e. the MemoryChunk
// was registered to be executable. It can be used by concurrent threads. // was registered to be executable. It can be used by concurrent threads.

View File

@ -225,6 +225,9 @@ namespace {
void MarkRoots(Heap* heap) { void MarkRoots(Heap* heap) {
IncrementalMarkingRootMarkingVisitor visitor(heap); IncrementalMarkingRootMarkingVisitor visitor(heap);
CodePageHeaderModificationScope rwx_write_scope(
"Marking of builtins table entries require write access to Code page "
"header");
heap->IterateRoots( heap->IterateRoots(
&visitor, &visitor,
base::EnumSet<SkipRoot>{SkipRoot::kStack, SkipRoot::kMainThreadHandles, base::EnumSet<SkipRoot>{SkipRoot::kStack, SkipRoot::kMainThreadHandles,
@ -304,7 +307,11 @@ void IncrementalMarking::StartBlackAllocation() {
black_allocation_ = true; black_allocation_ = true;
heap()->old_space()->MarkLinearAllocationAreaBlack(); heap()->old_space()->MarkLinearAllocationAreaBlack();
if (heap()->map_space()) heap()->map_space()->MarkLinearAllocationAreaBlack(); if (heap()->map_space()) heap()->map_space()->MarkLinearAllocationAreaBlack();
heap()->code_space()->MarkLinearAllocationAreaBlack(); {
CodePageHeaderModificationScope rwx_write_scope(
"Marking Code objects requires write access to the Code page header");
heap()->code_space()->MarkLinearAllocationAreaBlack();
}
heap()->safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) { heap()->safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
local_heap->MarkLinearAllocationAreaBlack(); local_heap->MarkLinearAllocationAreaBlack();
}); });
@ -318,7 +325,11 @@ void IncrementalMarking::PauseBlackAllocation() {
DCHECK(IsMarking()); DCHECK(IsMarking());
heap()->old_space()->UnmarkLinearAllocationArea(); heap()->old_space()->UnmarkLinearAllocationArea();
if (heap()->map_space()) heap()->map_space()->UnmarkLinearAllocationArea(); if (heap()->map_space()) heap()->map_space()->UnmarkLinearAllocationArea();
heap()->code_space()->UnmarkLinearAllocationArea(); {
CodePageHeaderModificationScope rwx_write_scope(
"Marking Code objects requires write access to the Code page header");
heap()->code_space()->UnmarkLinearAllocationArea();
}
heap()->safepoint()->IterateLocalHeaps( heap()->safepoint()->IterateLocalHeaps(
[](LocalHeap* local_heap) { local_heap->UnmarkLinearAllocationArea(); }); [](LocalHeap* local_heap) { local_heap->UnmarkLinearAllocationArea(); });
if (FLAG_trace_incremental_marking) { if (FLAG_trace_incremental_marking) {

View File

@ -84,6 +84,9 @@ LocalHeap::~LocalHeap() {
FreeLinearAllocationArea(); FreeLinearAllocationArea();
if (!is_main_thread()) { if (!is_main_thread()) {
CodePageHeaderModificationScope rwx_write_scope(
"Publishing of marking barrier results for Code space pages requires "
"write access to Code page headers");
marking_barrier_->Publish(); marking_barrier_->Publish();
WriteBarrier::ClearForThread(marking_barrier_.get()); WriteBarrier::ClearForThread(marking_barrier_.get());
} }

View File

@ -704,7 +704,12 @@ void MarkCompactCollector::EnsureSweepingCompleted(
sweeper()->EnsureCompleted(); sweeper()->EnsureCompleted();
heap()->old_space()->RefillFreeList(); heap()->old_space()->RefillFreeList();
heap()->code_space()->RefillFreeList(); {
CodePageHeaderModificationScope rwx_write_scope(
"Updating per-page stats stored in page headers requires write "
"access to Code page headers");
heap()->code_space()->RefillFreeList();
}
if (heap()->map_space()) { if (heap()->map_space()) {
heap()->map_space()->RefillFreeList(); heap()->map_space()->RefillFreeList();
heap()->map_space()->SortFreeList(); heap()->map_space()->SortFreeList();
@ -825,6 +830,9 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
std::vector<LiveBytesPagePair> pages; std::vector<LiveBytesPagePair> pages;
pages.reserve(number_of_pages); pages.reserve(number_of_pages);
CodePageHeaderModificationScope rwx_write_scope(
"Modification of Code page header flags requires write access");
DCHECK(!sweeping_in_progress()); DCHECK(!sweeping_in_progress());
Page* owner_of_linear_allocation_area = Page* owner_of_linear_allocation_area =
space->top() == space->limit() space->top() == space->limit()
@ -2223,6 +2231,8 @@ std::pair<size_t, size_t> MarkCompactCollector::ProcessMarkingWorklist(
bool is_per_context_mode = local_marking_worklists()->IsPerContextMode(); bool is_per_context_mode = local_marking_worklists()->IsPerContextMode();
Isolate* isolate = heap()->isolate(); Isolate* isolate = heap()->isolate();
PtrComprCageBase cage_base(isolate); PtrComprCageBase cage_base(isolate);
CodePageHeaderModificationScope rwx_write_scope(
"Marking of Code objects require write access to Code page headers");
while (local_marking_worklists()->Pop(&object) || while (local_marking_worklists()->Pop(&object) ||
local_marking_worklists()->PopOnHold(&object)) { local_marking_worklists()->PopOnHold(&object)) {
// Left trimming may result in grey or black filler objects on the marking // Left trimming may result in grey or black filler objects on the marking
@ -3831,6 +3841,9 @@ void FullEvacuator::RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) {
marking_state->live_bytes(chunk)); marking_state->live_bytes(chunk));
break; break;
case kObjectsOldToOld: { case kObjectsOldToOld: {
CodePageHeaderModificationScope rwx_write_scope(
"Clearing of markbits in Code spaces requires write access to "
"Code page headers");
const bool success = LiveObjectVisitor::VisitBlackObjects( const bool success = LiveObjectVisitor::VisitBlackObjects(
chunk, marking_state, &old_space_visitor_, chunk, marking_state, &old_space_visitor_,
LiveObjectVisitor::kClearMarkbits, &failed_object); LiveObjectVisitor::kClearMarkbits, &failed_object);

View File

@ -234,7 +234,11 @@ void MarkingBarrier::Activate(bool is_compacting) {
if (is_main_thread_barrier_) { if (is_main_thread_barrier_) {
ActivateSpace(heap_->old_space()); ActivateSpace(heap_->old_space());
if (heap_->map_space()) ActivateSpace(heap_->map_space()); if (heap_->map_space()) ActivateSpace(heap_->map_space());
ActivateSpace(heap_->code_space()); {
CodePageHeaderModificationScope rwx_write_scope(
"Modification of Code page header flags requires write access");
ActivateSpace(heap_->code_space());
}
ActivateSpace(heap_->new_space()); ActivateSpace(heap_->new_space());
for (LargePage* p : *heap_->new_lo_space()) { for (LargePage* p : *heap_->new_lo_space()) {
@ -246,8 +250,12 @@ void MarkingBarrier::Activate(bool is_compacting) {
p->SetOldGenerationPageFlags(true); p->SetOldGenerationPageFlags(true);
} }
for (LargePage* p : *heap_->code_lo_space()) { {
p->SetOldGenerationPageFlags(true); CodePageHeaderModificationScope rwx_write_scope(
"Modification of Code page header flags requires write access");
for (LargePage* p : *heap_->code_lo_space()) {
p->SetOldGenerationPageFlags(true);
}
} }
} }
} }

View File

@ -489,6 +489,13 @@ void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
} }
void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) { void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
base::Optional<CodePageHeaderModificationScope> rwx_write_scope;
if (chunk->executable() == EXECUTABLE) {
rwx_write_scope.emplace(
"We are going to modify the chunk's header, so ensure we have write "
"access to Code page headers");
}
DCHECK(chunk->IsFlagSet(MemoryChunk::UNREGISTERED)); DCHECK(chunk->IsFlagSet(MemoryChunk::UNREGISTERED));
DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED)); DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
DCHECK(!chunk->InReadOnlySpace()); DCHECK(!chunk->InReadOnlySpace());
@ -673,28 +680,57 @@ bool MemoryAllocator::SetPermissionsOnExecutableMemoryChunk(VirtualMemory* vm,
const Address code_area = start + code_area_offset; const Address code_area = start + code_area_offset;
const Address post_guard_page = start + chunk_size - guard_size; const Address post_guard_page = start + chunk_size - guard_size;
// Commit the non-executable header, from start to pre-code guard page. bool jitless = unmapper_.heap_->isolate()->jitless();
if (vm->SetPermissions(start, pre_guard_offset, PageAllocator::kReadWrite)) {
// Create the pre-code guard page, following the header.
if (vm->SetPermissions(pre_guard_page, page_size,
PageAllocator::kNoAccess)) {
// Commit the executable code body.
if (vm->SetPermissions(code_area, area_size,
MemoryChunk::GetCodeModificationPermission())) {
// Create the post-code guard page.
if (vm->SetPermissions(post_guard_page, page_size,
PageAllocator::kNoAccess)) {
UpdateAllocatedSpaceLimits(start, code_area + area_size);
return true;
}
vm->SetPermissions(code_area, area_size, PageAllocator::kNoAccess); if (V8_HEAP_USE_PTHREAD_JIT_WRITE_PROTECT && !jitless) {
DCHECK(unmapper_.heap_->isolate()->RequiresCodeRange());
// Commit the header, from start to pre-code guard page.
// We have to commit it as executable becase otherwise we'll not be able
// to change permissions to anything else.
if (vm->RecommitPages(start, pre_guard_offset,
PageAllocator::kReadWriteExecute)) {
// Create the pre-code guard page, following the header.
if (vm->DiscardSystemPages(pre_guard_page, page_size)) {
// Commit the executable code body.
if (vm->RecommitPages(code_area, area_size,
PageAllocator::kReadWriteExecute)) {
// Create the post-code guard page.
if (vm->DiscardSystemPages(post_guard_page, page_size)) {
UpdateAllocatedSpaceLimits(start, code_area + area_size);
return true;
}
vm->DiscardSystemPages(code_area, area_size);
}
} }
vm->DiscardSystemPages(start, pre_guard_offset);
} }
vm->SetPermissions(start, pre_guard_offset, PageAllocator::kNoAccess); } else {
} // Commit the non-executable header, from start to pre-code guard page.
if (vm->SetPermissions(start, pre_guard_offset,
PageAllocator::kReadWrite)) {
// Create the pre-code guard page, following the header.
if (vm->SetPermissions(pre_guard_page, page_size,
PageAllocator::kNoAccess)) {
// Commit the executable code body.
if (vm->SetPermissions(
code_area, area_size,
jitless ? PageAllocator::kReadWrite
: MemoryChunk::GetCodeModificationPermission())) {
// Create the post-code guard page.
if (vm->SetPermissions(post_guard_page, page_size,
PageAllocator::kNoAccess)) {
UpdateAllocatedSpaceLimits(start, code_area + area_size);
return true;
}
vm->SetPermissions(code_area, area_size, PageAllocator::kNoAccess);
}
}
vm->SetPermissions(start, pre_guard_offset, PageAllocator::kNoAccess);
}
}
return false; return false;
} }

View File

@ -45,6 +45,7 @@ void MemoryChunk::InitializationMemoryFence() {
void MemoryChunk::DecrementWriteUnprotectCounterAndMaybeSetPermissions( void MemoryChunk::DecrementWriteUnprotectCounterAndMaybeSetPermissions(
PageAllocator::Permission permission) { PageAllocator::Permission permission) {
DCHECK(!V8_HEAP_USE_PTHREAD_JIT_WRITE_PROTECT);
DCHECK(permission == PageAllocator::kRead || DCHECK(permission == PageAllocator::kRead ||
permission == PageAllocator::kReadExecute); permission == PageAllocator::kReadExecute);
DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE)); DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
@ -81,6 +82,7 @@ void MemoryChunk::SetReadAndExecutable() {
} }
void MemoryChunk::SetCodeModificationPermissions() { void MemoryChunk::SetCodeModificationPermissions() {
DCHECK(!V8_HEAP_USE_PTHREAD_JIT_WRITE_PROTECT);
DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE)); DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
DCHECK(owner_identity() == CODE_SPACE || owner_identity() == CODE_LO_SPACE); DCHECK(owner_identity() == CODE_SPACE || owner_identity() == CODE_LO_SPACE);
// Incrementing the write_unprotect_counter_ and changing the page // Incrementing the write_unprotect_counter_ and changing the page
@ -114,8 +116,12 @@ void MemoryChunk::SetDefaultCodePermissions() {
namespace { namespace {
PageAllocator::Permission DefaultWritableCodePermissions() { PageAllocator::Permission DefaultWritableCodePermissions() {
return FLAG_jitless ? PageAllocator::kReadWrite DCHECK(!V8_HEAP_USE_PTHREAD_JIT_WRITE_PROTECT);
: PageAllocator::kReadWriteExecute; // On MacOS on ARM64 RWX permissions are allowed to be set only when
// fast W^X is enabled (see V8_HEAP_USE_PTHREAD_JIT_WRITE_PROTECT).
return V8_HAS_PTHREAD_JIT_WRITE_PROTECT || FLAG_jitless
? PageAllocator::kReadWrite
: PageAllocator::kReadWriteExecute;
} }
} // namespace } // namespace
@ -162,7 +168,7 @@ MemoryChunk::MemoryChunk(Heap* heap, BaseSpace* space, size_t chunk_size,
if (heap->write_protect_code_memory()) { if (heap->write_protect_code_memory()) {
write_unprotect_counter_ = write_unprotect_counter_ =
heap->code_space_memory_modification_scope_depth(); heap->code_space_memory_modification_scope_depth();
} else { } else if (!V8_HEAP_USE_PTHREAD_JIT_WRITE_PROTECT) {
size_t page_size = MemoryAllocator::GetCommitPageSize(); size_t page_size = MemoryAllocator::GetCommitPageSize();
DCHECK(IsAligned(area_start_, page_size)); DCHECK(IsAligned(area_start_, page_size));
size_t area_size = RoundUp(area_end_ - area_start_, page_size); size_t area_size = RoundUp(area_end_ - area_start_, page_size);

View File

@ -188,8 +188,12 @@ class MemoryChunk : public BasicMemoryChunk {
void InitializationMemoryFence(); void InitializationMemoryFence();
static PageAllocator::Permission GetCodeModificationPermission() { static PageAllocator::Permission GetCodeModificationPermission() {
return FLAG_write_code_using_rwx ? PageAllocator::kReadWriteExecute DCHECK(!V8_HEAP_USE_PTHREAD_JIT_WRITE_PROTECT);
: PageAllocator::kReadWrite; // On MacOS on ARM64 RWX permissions are allowed to be set only when
// fast W^X is enabled (see V8_HEAP_USE_PTHREAD_JIT_WRITE_PROTECT).
return !V8_HAS_PTHREAD_JIT_WRITE_PROTECT && FLAG_write_code_using_rwx
? PageAllocator::kReadWriteExecute
: PageAllocator::kReadWrite;
} }
V8_EXPORT_PRIVATE void SetReadable(); V8_EXPORT_PRIVATE void SetReadable();

View File

@ -210,6 +210,9 @@ size_t PagedSpace::CommittedPhysicalMemory() const {
DCHECK_EQ(0, committed_physical_memory()); DCHECK_EQ(0, committed_physical_memory());
return CommittedMemory(); return CommittedMemory();
} }
CodePageHeaderModificationScope rwx_write_scope(
"Updating high water mark for Code pages requries write access to "
"the Code page headers");
BasicMemoryChunk::UpdateHighWaterMark(allocation_info_->top()); BasicMemoryChunk::UpdateHighWaterMark(allocation_info_->top());
return committed_physical_memory(); return committed_physical_memory();
} }

View File

@ -51,6 +51,7 @@ struct PtrComprCageReservationParams
RoundUp(size_t{1} << kPageSizeBits, page_allocator->AllocatePageSize()); RoundUp(size_t{1} << kPageSizeBits, page_allocator->AllocatePageSize());
requested_start_hint = requested_start_hint =
reinterpret_cast<Address>(page_allocator->GetRandomMmapAddr()); reinterpret_cast<Address>(page_allocator->GetRandomMmapAddr());
jit = JitPermission::kNoJit;
} }
}; };
#endif // V8_COMPRESS_POINTERS #endif // V8_COMPRESS_POINTERS

View File

@ -253,8 +253,9 @@ VirtualMemory::VirtualMemory(v8::PageAllocator* page_allocator, size_t size,
size_t page_size = page_allocator_->AllocatePageSize(); size_t page_size = page_allocator_->AllocatePageSize();
alignment = RoundUp(alignment, page_size); alignment = RoundUp(alignment, page_size);
PageAllocator::Permission permissions = PageAllocator::Permission permissions =
jit == kMapAsJittable ? PageAllocator::kNoAccessWillJitLater jit == JitPermission::kMapAsJittable
: PageAllocator::kNoAccess; ? PageAllocator::kNoAccessWillJitLater
: PageAllocator::kNoAccess;
Address address = reinterpret_cast<Address>(AllocatePages( Address address = reinterpret_cast<Address>(AllocatePages(
page_allocator_, hint, RoundUp(size, page_size), alignment, permissions)); page_allocator_, hint, RoundUp(size, page_size), alignment, permissions));
if (address != kNullAddress) { if (address != kNullAddress) {
@ -395,7 +396,7 @@ bool VirtualMemoryCage::InitReservation(
RoundUp(params.base_alignment, allocate_page_size)); RoundUp(params.base_alignment, allocate_page_size));
VirtualMemory reservation(params.page_allocator, params.reservation_size, VirtualMemory reservation(params.page_allocator, params.reservation_size,
reinterpret_cast<void*>(hint), reinterpret_cast<void*>(hint),
params.base_alignment); params.base_alignment, params.jit);
if (!reservation.IsReserved()) return false; if (!reservation.IsReserved()) return false;
reservation_ = std::move(reservation); reservation_ = std::move(reservation);
@ -413,9 +414,9 @@ bool VirtualMemoryCage::InitReservation(
for (int attempt = 0; attempt < kMaxAttempts; ++attempt) { for (int attempt = 0; attempt < kMaxAttempts; ++attempt) {
// Reserve a region of twice the size so that there is an aligned address // Reserve a region of twice the size so that there is an aligned address
// within it that's usable as the cage base. // within it that's usable as the cage base.
VirtualMemory padded_reservation(params.page_allocator, VirtualMemory padded_reservation(
params.reservation_size * 2, params.page_allocator, params.reservation_size * 2,
reinterpret_cast<void*>(hint)); reinterpret_cast<void*>(hint), 1, params.jit);
if (!padded_reservation.IsReserved()) return false; if (!padded_reservation.IsReserved()) return false;
// Find properly aligned sub-region inside the reservation. // Find properly aligned sub-region inside the reservation.
@ -449,9 +450,9 @@ bool VirtualMemoryCage::InitReservation(
// of reserved address space regions. // of reserved address space regions.
padded_reservation.Free(); padded_reservation.Free();
VirtualMemory reservation(params.page_allocator, VirtualMemory reservation(
params.reservation_size, params.page_allocator, params.reservation_size,
reinterpret_cast<void*>(address)); reinterpret_cast<void*>(address), 1, params.jit);
if (!reservation.IsReserved()) return false; if (!reservation.IsReserved()) return false;
// The reservation could still be somewhere else but we can accept it // The reservation could still be somewhere else but we can accept it
@ -476,11 +477,21 @@ bool VirtualMemoryCage::InitReservation(
params.base_bias_size, params.base_bias_size,
params.page_size); params.page_size);
size_ = allocatable_base + allocatable_size - base_; size_ = allocatable_base + allocatable_size - base_;
const base::PageFreeingMode page_freeing_mode =
V8_HEAP_USE_PTHREAD_JIT_WRITE_PROTECT &&
params.jit == JitPermission::kMapAsJittable
// On MacOS on ARM64 ("Apple M1"/Apple Silicon) setting permission to
// none might fail if the pages were allocated with RWX permissions,
// so use kDiscard mode instead.
? base::PageFreeingMode::kDiscard
: base::PageFreeingMode::kMakeInaccessible;
page_allocator_ = std::make_unique<base::BoundedPageAllocator>( page_allocator_ = std::make_unique<base::BoundedPageAllocator>(
params.page_allocator, allocatable_base, allocatable_size, params.page_allocator, allocatable_base, allocatable_size,
params.page_size, params.page_size,
base::PageInitializationMode::kAllocatedPagesCanBeUninitialized, base::PageInitializationMode::kAllocatedPagesCanBeUninitialized,
base::PageFreeingMode::kMakeInaccessible); page_freeing_mode);
return true; return true;
} }

View File

@ -188,11 +188,13 @@ inline bool SetPermissions(v8::PageAllocator* page_allocator, Address address,
// could be released, false otherwise. // could be released, false otherwise.
V8_EXPORT_PRIVATE bool OnCriticalMemoryPressure(size_t length); V8_EXPORT_PRIVATE bool OnCriticalMemoryPressure(size_t length);
// Defines whether the address space reservation is going to be used for
// allocating executable pages.
enum class JitPermission { kNoJit, kMapAsJittable };
// Represents and controls an area of reserved memory. // Represents and controls an area of reserved memory.
class VirtualMemory final { class VirtualMemory final {
public: public:
enum JitPermission { kNoJit, kMapAsJittable };
// Empty VirtualMemory object, controlling no reserved memory. // Empty VirtualMemory object, controlling no reserved memory.
V8_EXPORT_PRIVATE VirtualMemory(); V8_EXPORT_PRIVATE VirtualMemory();
@ -205,7 +207,7 @@ class VirtualMemory final {
// This may not be at the position returned by address(). // This may not be at the position returned by address().
V8_EXPORT_PRIVATE VirtualMemory(v8::PageAllocator* page_allocator, V8_EXPORT_PRIVATE VirtualMemory(v8::PageAllocator* page_allocator,
size_t size, void* hint, size_t alignment = 1, size_t size, void* hint, size_t alignment = 1,
JitPermission jit = kNoJit); JitPermission jit = JitPermission::kNoJit);
// Construct a virtual memory by assigning it some already mapped address // Construct a virtual memory by assigning it some already mapped address
// and size. // and size.
@ -388,6 +390,7 @@ class VirtualMemoryCage {
size_t base_bias_size; size_t base_bias_size;
size_t page_size; size_t page_size;
Address requested_start_hint; Address requested_start_hint;
JitPermission jit;
static constexpr size_t kAnyBaseAlignment = 1; static constexpr size_t kAnyBaseAlignment = 1;
}; };

View File

@ -1999,7 +1999,7 @@ VirtualMemory WasmCodeManager::TryAllocate(size_t size, void* hint) {
// will have to determine whether we set kMapAsJittable or not. // will have to determine whether we set kMapAsJittable or not.
DCHECK(!FLAG_jitless); DCHECK(!FLAG_jitless);
VirtualMemory mem(page_allocator, size, hint, allocate_page_size, VirtualMemory mem(page_allocator, size, hint, allocate_page_size,
VirtualMemory::kMapAsJittable); JitPermission::kMapAsJittable);
if (!mem.IsReserved()) return {}; if (!mem.IsReserved()) return {};
TRACE_HEAP("VMem alloc: 0x%" PRIxPTR ":0x%" PRIxPTR " (%zu)\n", mem.address(), TRACE_HEAP("VMem alloc: 0x%" PRIxPTR ":0x%" PRIxPTR " (%zu)\n", mem.address(),
mem.end(), mem.size()); mem.end(), mem.size());

View File

@ -452,6 +452,8 @@ class ConcurrentRecordRelocSlotThread final : public v8::base::Thread {
void Run() override { void Run() override {
LocalHeap local_heap(heap_, ThreadKind::kBackground); LocalHeap local_heap(heap_, ThreadKind::kBackground);
UnparkedScope unparked_scope(&local_heap); UnparkedScope unparked_scope(&local_heap);
RwxMemoryWriteScope rwx_write_scope(
"Modification of Code object requires write access");
int mode_mask = RelocInfo::EmbeddedObjectModeMask(); int mode_mask = RelocInfo::EmbeddedObjectModeMask();
for (RelocIterator it(code_, mode_mask); !it.done(); it.next()) { for (RelocIterator it(code_, mode_mask); !it.done(); it.next()) {
DCHECK(RelocInfo::IsEmbeddedObjectMode(it.rinfo()->rmode())); DCHECK(RelocInfo::IsEmbeddedObjectMode(it.rinfo()->rmode()));

View File

@ -170,14 +170,32 @@ TEST(MemoryChunk) {
// With CodeRange. // With CodeRange.
const size_t code_range_size = 32 * MB; const size_t code_range_size = 32 * MB;
VirtualMemory code_range_reservation(page_allocator, code_range_size, VirtualMemory code_range_reservation(page_allocator, code_range_size,
nullptr, MemoryChunk::kAlignment); nullptr, MemoryChunk::kAlignment,
JitPermission::kMapAsJittable);
base::PageFreeingMode page_freeing_mode =
base::PageFreeingMode::kMakeInaccessible;
// On MacOS on ARM64 the code range reservation must be committed as RWX.
if (V8_HEAP_USE_PTHREAD_JIT_WRITE_PROTECT) {
page_freeing_mode = base::PageFreeingMode::kDiscard;
void* base = reinterpret_cast<void*>(code_range_reservation.address());
CHECK(page_allocator->SetPermissions(base, code_range_size,
PageAllocator::kReadWriteExecute));
CHECK(page_allocator->DiscardSystemPages(base, code_range_size));
}
CHECK(code_range_reservation.IsReserved()); CHECK(code_range_reservation.IsReserved());
base::BoundedPageAllocator code_page_allocator( base::BoundedPageAllocator code_page_allocator(
page_allocator, code_range_reservation.address(), page_allocator, code_range_reservation.address(),
code_range_reservation.size(), MemoryChunk::kAlignment, code_range_reservation.size(), MemoryChunk::kAlignment,
base::PageInitializationMode::kAllocatedPagesCanBeUninitialized, base::PageInitializationMode::kAllocatedPagesCanBeUninitialized,
base::PageFreeingMode::kMakeInaccessible); page_freeing_mode);
RwxMemoryWriteScope rwx_write_scope(
"Modification of pages in code_range_reservation requires write "
"access");
VerifyMemoryChunk(isolate, heap, &code_page_allocator, area_size, VerifyMemoryChunk(isolate, heap, &code_page_allocator, area_size,
EXECUTABLE, PageSize::kLarge, heap->code_lo_space()); EXECUTABLE, PageSize::kLarge, heap->code_lo_space());

View File

@ -123,7 +123,7 @@ static void InitializeVM() {
HandleScope scope(isolate); \ HandleScope scope(isolate); \
CHECK_NOT_NULL(isolate); \ CHECK_NOT_NULL(isolate); \
auto owned_buf = \ auto owned_buf = \
AllocateAssemblerBuffer(buf_size, nullptr, VirtualMemory::kNoJit); \ AllocateAssemblerBuffer(buf_size, nullptr, JitPermission::kNoJit); \
MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes, \ MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes, \
ExternalAssemblerBuffer(owned_buf->start(), buf_size)); \ ExternalAssemblerBuffer(owned_buf->start(), buf_size)); \
std::optional<AssemblerBufferWriteScope> rw_buffer_scope; \ std::optional<AssemblerBufferWriteScope> rw_buffer_scope; \

View File

@ -185,7 +185,7 @@ TEST(TestFlushICacheOfWritableAndExecutable) {
for (int i = 0; i < kNumIterations; ++i) { for (int i = 0; i < kNumIterations; ++i) {
auto buffer = AllocateAssemblerBuffer(kBufferSize, nullptr, auto buffer = AllocateAssemblerBuffer(kBufferSize, nullptr,
VirtualMemory::kMapAsJittable); JitPermission::kMapAsJittable);
// Allow calling the function from C++. // Allow calling the function from C++.
auto f = GeneratedCode<F0>::FromBuffer(isolate, buffer->start()); auto f = GeneratedCode<F0>::FromBuffer(isolate, buffer->start());

View File

@ -249,7 +249,7 @@ TEST(JumpTablePatchingStress) {
STATIC_ASSERT(kAssemblerBufferSize >= kJumpTableSize); STATIC_ASSERT(kAssemblerBufferSize >= kJumpTableSize);
auto buffer = AllocateAssemblerBuffer(kAssemblerBufferSize, nullptr, auto buffer = AllocateAssemblerBuffer(kAssemblerBufferSize, nullptr,
VirtualMemory::kMapAsJittable); JitPermission::kMapAsJittable);
byte* thunk_slot_buffer = buffer->start() + kBufferSlotStartOffset; byte* thunk_slot_buffer = buffer->start() + kBufferSlotStartOffset;
std::bitset<kAvailableBufferSlots> used_thunk_slots; std::bitset<kAvailableBufferSlots> used_thunk_slots;

View File

@ -16,9 +16,9 @@ namespace internal {
class TestingAssemblerBuffer : public AssemblerBuffer { class TestingAssemblerBuffer : public AssemblerBuffer {
public: public:
TestingAssemblerBuffer( TestingAssemblerBuffer(size_t requested, void* address,
size_t requested, void* address, JitPermission jit_permission = JitPermission::kNoJit)
VirtualMemory::JitPermission jit_permission = VirtualMemory::kNoJit) { : protection_reconfiguration_is_allowed_(true) {
size_t page_size = v8::internal::AllocatePageSize(); size_t page_size = v8::internal::AllocatePageSize();
size_t alloc_size = RoundUp(requested, page_size); size_t alloc_size = RoundUp(requested, page_size);
CHECK_GE(kMaxInt, alloc_size); CHECK_GE(kMaxInt, alloc_size);
@ -52,25 +52,35 @@ class TestingAssemblerBuffer : public AssemblerBuffer {
// See https://bugs.chromium.org/p/v8/issues/detail?id=8157 // See https://bugs.chromium.org/p/v8/issues/detail?id=8157
FlushInstructionCache(start(), size()); FlushInstructionCache(start(), size());
bool result = SetPermissions(GetPlatformPageAllocator(), start(), size(), if (protection_reconfiguration_is_allowed_) {
v8::PageAllocator::kReadExecute); bool result = SetPermissions(GetPlatformPageAllocator(), start(), size(),
CHECK(result); v8::PageAllocator::kReadExecute);
CHECK(result);
}
} }
void MakeWritable() { void MakeWritable() {
bool result = SetPermissions(GetPlatformPageAllocator(), start(), size(), if (protection_reconfiguration_is_allowed_) {
v8::PageAllocator::kReadWrite); bool result = SetPermissions(GetPlatformPageAllocator(), start(), size(),
CHECK(result); v8::PageAllocator::kReadWrite);
CHECK(result);
}
} }
void MakeWritableAndExecutable() { void MakeWritableAndExecutable() {
bool result = SetPermissions(GetPlatformPageAllocator(), start(), size(), bool result = SetPermissions(GetPlatformPageAllocator(), start(), size(),
v8::PageAllocator::kReadWriteExecute); v8::PageAllocator::kReadWriteExecute);
CHECK(result); CHECK(result);
// Once buffer protection is set to RWX it might not be allowed to be
// changed anymore.
protection_reconfiguration_is_allowed_ =
!V8_HEAP_USE_PTHREAD_JIT_WRITE_PROTECT &&
protection_reconfiguration_is_allowed_;
} }
private: private:
VirtualMemory reservation_; VirtualMemory reservation_;
bool protection_reconfiguration_is_allowed_;
}; };
// This scope class is mostly necesasry for arm64 tests running on Apple Silicon // This scope class is mostly necesasry for arm64 tests running on Apple Silicon
@ -101,7 +111,7 @@ class V8_NODISCARD AssemblerBufferWriteScope final {
static inline std::unique_ptr<TestingAssemblerBuffer> AllocateAssemblerBuffer( static inline std::unique_ptr<TestingAssemblerBuffer> AllocateAssemblerBuffer(
size_t requested = v8::internal::AssemblerBase::kDefaultBufferSize, size_t requested = v8::internal::AssemblerBase::kDefaultBufferSize,
void* address = nullptr, void* address = nullptr,
VirtualMemory::JitPermission jit_permission = VirtualMemory::kNoJit) { JitPermission jit_permission = JitPermission::kMapAsJittable) {
return std::make_unique<TestingAssemblerBuffer>(requested, address, return std::make_unique<TestingAssemblerBuffer>(requested, address,
jit_permission); jit_permission);
} }

View File

@ -3,6 +3,7 @@
// found in the LICENSE file. // found in the LICENSE file.
#include <map> #include <map>
#include <optional>
#include "src/base/region-allocator.h" #include "src/base/region-allocator.h"
#include "src/execution/isolate.h" #include "src/execution/isolate.h"
@ -55,7 +56,8 @@ class TrackingPageAllocator : public ::v8::PageAllocator {
CHECK(region_allocator_.AllocateRegionAt(current_page, size)); CHECK(region_allocator_.AllocateRegionAt(current_page, size));
Address end = current_page + size; Address end = current_page + size;
while (current_page < end) { while (current_page < end) {
page_permissions_.insert({current_page, access}); PageState state{access, access != kNoAccess};
page_permissions_.insert({current_page, state});
current_page += commit_page_size_; current_page += commit_page_size_;
} }
} }
@ -99,19 +101,32 @@ class TrackingPageAllocator : public ::v8::PageAllocator {
bool RecommitPages(void* address, size_t size, bool RecommitPages(void* address, size_t size,
PageAllocator::Permission access) override { PageAllocator::Permission access) override {
UNREACHABLE(); bool result = page_allocator_->RecommitPages(address, size, access);
if (result) {
// Check that given range had given access permissions.
CheckPagePermissions(reinterpret_cast<Address>(address), size, access,
{});
UpdatePagePermissions(reinterpret_cast<Address>(address), size, access,
true);
}
return result;
} }
bool DiscardSystemPages(void* address, size_t size) override { bool DiscardSystemPages(void* address, size_t size) override {
UNREACHABLE(); bool result = page_allocator_->DiscardSystemPages(address, size);
if (result) {
UpdatePagePermissions(reinterpret_cast<Address>(address), size, {},
false);
}
return result;
} }
bool DecommitPages(void* address, size_t size) override { bool DecommitPages(void* address, size_t size) override {
bool result = page_allocator_->DecommitPages(address, size); bool result = page_allocator_->DecommitPages(address, size);
if (result) { if (result) {
// Mark pages as non-accessible. // Mark pages as non-accessible.
UpdatePagePermissions(reinterpret_cast<Address>(address), size, UpdatePagePermissions(reinterpret_cast<Address>(address), size, kNoAccess,
kNoAccess); false);
} }
return result; return result;
} }
@ -120,7 +135,9 @@ class TrackingPageAllocator : public ::v8::PageAllocator {
PageAllocator::Permission access) override { PageAllocator::Permission access) override {
bool result = page_allocator_->SetPermissions(address, size, access); bool result = page_allocator_->SetPermissions(address, size, access);
if (result) { if (result) {
UpdatePagePermissions(reinterpret_cast<Address>(address), size, access); bool committed = access != kNoAccess && access != kNoAccessWillJitLater;
UpdatePagePermissions(reinterpret_cast<Address>(address), size, access,
committed);
} }
return result; return result;
} }
@ -135,9 +152,15 @@ class TrackingPageAllocator : public ::v8::PageAllocator {
} }
void CheckPagePermissions(Address address, size_t size, void CheckPagePermissions(Address address, size_t size,
PageAllocator::Permission access) { PageAllocator::Permission access,
std::optional<bool> committed = {true}) {
CHECK_IMPLIES(committed.has_value() && committed.value(),
access != PageAllocator::kNoAccess);
ForEachPage(address, size, [=](PagePermissionsMap::value_type* value) { ForEachPage(address, size, [=](PagePermissionsMap::value_type* value) {
EXPECT_EQ(access, value->second); if (committed.has_value()) {
EXPECT_EQ(committed.value(), value->second.committed);
}
EXPECT_EQ(access, value->second.access);
}); });
} }
@ -160,32 +183,40 @@ class TrackingPageAllocator : public ::v8::PageAllocator {
Address contiguous_region_end = contiguous_region_start; Address contiguous_region_end = contiguous_region_start;
PageAllocator::Permission contiguous_region_access = PageAllocator::Permission contiguous_region_access =
PageAllocator::kNoAccess; PageAllocator::kNoAccess;
bool contiguous_region_access_committed = false;
for (auto& pair : page_permissions_) { for (auto& pair : page_permissions_) {
if (contiguous_region_end == pair.first && if (contiguous_region_end == pair.first &&
pair.second == contiguous_region_access) { pair.second.access == contiguous_region_access &&
pair.second.committed == contiguous_region_access_committed) {
contiguous_region_end += commit_page_size_; contiguous_region_end += commit_page_size_;
continue; continue;
} }
if (contiguous_region_start != contiguous_region_end) { if (contiguous_region_start != contiguous_region_end) {
PrintRegion(os, contiguous_region_start, contiguous_region_end, PrintRegion(os, contiguous_region_start, contiguous_region_end,
contiguous_region_access); contiguous_region_access,
contiguous_region_access_committed);
} }
contiguous_region_start = pair.first; contiguous_region_start = pair.first;
contiguous_region_end = pair.first + commit_page_size_; contiguous_region_end = pair.first + commit_page_size_;
contiguous_region_access = pair.second; contiguous_region_access = pair.second.access;
contiguous_region_access_committed = pair.second.committed;
} }
if (contiguous_region_start != contiguous_region_end) { if (contiguous_region_start != contiguous_region_end) {
PrintRegion(os, contiguous_region_start, contiguous_region_end, PrintRegion(os, contiguous_region_start, contiguous_region_end,
contiguous_region_access); contiguous_region_access, contiguous_region_access_committed);
} }
} }
private: private:
using PagePermissionsMap = std::map<Address, PageAllocator::Permission>; struct PageState {
PageAllocator::Permission access;
bool committed;
};
using PagePermissionsMap = std::map<Address, PageState>;
using ForEachFn = std::function<void(PagePermissionsMap::value_type*)>; using ForEachFn = std::function<void(PagePermissionsMap::value_type*)>;
static void PrintRegion(std::ostream& os, Address start, Address end, static void PrintRegion(std::ostream& os, Address start, Address end,
PageAllocator::Permission access) { PageAllocator::Permission access, bool committed) {
os << " page: [" << start << ", " << end << "), access: "; os << " page: [" << start << ", " << end << "), access: ";
switch (access) { switch (access) {
case PageAllocator::kNoAccess: case PageAllocator::kNoAccess:
@ -205,7 +236,7 @@ class TrackingPageAllocator : public ::v8::PageAllocator {
os << "RX"; os << "RX";
break; break;
} }
os << "\n"; os << ", committed: " << static_cast<int>(committed) << "\n";
} }
void ForEachPage(Address address, size_t size, const ForEachFn& fn) { void ForEachPage(Address address, size_t size, const ForEachFn& fn) {
@ -227,9 +258,13 @@ class TrackingPageAllocator : public ::v8::PageAllocator {
} }
void UpdatePagePermissions(Address address, size_t size, void UpdatePagePermissions(Address address, size_t size,
PageAllocator::Permission access) { std::optional<PageAllocator::Permission> access,
bool committed) {
ForEachPage(address, size, [=](PagePermissionsMap::value_type* value) { ForEachPage(address, size, [=](PagePermissionsMap::value_type* value) {
value->second = access; if (access.has_value()) {
value->second.access = access.value();
}
value->second.committed = committed;
}); });
} }
@ -357,15 +392,15 @@ TEST_F(SequentialUnmapperTest, UnmapOnTeardownAfterAlreadyFreeingPooled) {
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size, tracking_page_allocator()->CheckPagePermissions(page->address(), page_size,
PageAllocator::kReadWrite); PageAllocator::kReadWrite);
unmapper()->FreeQueuedChunks(); unmapper()->FreeQueuedChunks();
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size, tracking_page_allocator()->CheckPagePermissions(
PageAllocator::kNoAccess); page->address(), page_size, PageAllocator::kNoAccess, false);
unmapper()->TearDown(); unmapper()->TearDown();
#ifdef V8_COMPRESS_POINTERS #ifdef V8_COMPRESS_POINTERS
// In this mode Isolate uses bounded page allocator which allocates pages // In this mode Isolate uses bounded page allocator which allocates pages
// inside prereserved region. Thus these pages are kept reserved until // inside prereserved region. Thus these pages are kept reserved until
// the Isolate dies. // the Isolate dies.
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size, tracking_page_allocator()->CheckPagePermissions(
PageAllocator::kNoAccess); page->address(), page_size, PageAllocator::kNoAccess, false);
#else #else
tracking_page_allocator()->CheckIsFree(page->address(), page_size); tracking_page_allocator()->CheckIsFree(page->address(), page_size);
#endif // V8_COMPRESS_POINTERS #endif // V8_COMPRESS_POINTERS
@ -391,8 +426,8 @@ TEST_F(SequentialUnmapperTest, UnmapOnTeardown) {
// In this mode Isolate uses bounded page allocator which allocates pages // In this mode Isolate uses bounded page allocator which allocates pages
// inside prereserved region. Thus these pages are kept reserved until // inside prereserved region. Thus these pages are kept reserved until
// the Isolate dies. // the Isolate dies.
tracking_page_allocator()->CheckPagePermissions(page->address(), page_size, tracking_page_allocator()->CheckPagePermissions(
PageAllocator::kNoAccess); page->address(), page_size, PageAllocator::kNoAccess, false);
#else #else
tracking_page_allocator()->CheckIsFree(page->address(), page_size); tracking_page_allocator()->CheckIsFree(page->address(), page_size);
#endif // V8_COMPRESS_POINTERS #endif // V8_COMPRESS_POINTERS