[wasm][mac] Support w^x codespaces for Apple Silicon

Apple's upcoming arm64 devices will prevent rwx access to memory,
but in turn provide a new per-thread way to switch between write
and execute permissions. This patch puts that system to use for
the WebAssembly subsystem.
The approach relies on CodeSpaceWriteScope objects for now. That
isn't optimal for background threads (which could stay in "write"
mode permanently instead of toggling), but its simplicity makes
it a good first step.

Background:
https://developer.apple.com/documentation/apple_silicon/porting_just-in-time_compilers_to_apple_silicon

Bug: chromium:1117591
Change-Id: I3b60f0efd34c0fed924dfc71ee2c7805801c5d42
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2378307
Commit-Queue: Jakob Kummerow <jkummerow@chromium.org>
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: Thibaud Michaud <thibaudm@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69791}
This commit is contained in:
Jakob Kummerow 2020-09-09 19:25:41 +02:00 committed by Commit Bot
parent 5587838ec7
commit 27e1ac1a79
11 changed files with 142 additions and 4 deletions

View File

@ -3278,6 +3278,7 @@ v8_source_set("v8_base_without_compiler") {
"src/wasm/baseline/liftoff-compiler.cc", "src/wasm/baseline/liftoff-compiler.cc",
"src/wasm/baseline/liftoff-compiler.h", "src/wasm/baseline/liftoff-compiler.h",
"src/wasm/baseline/liftoff-register.h", "src/wasm/baseline/liftoff-register.h",
"src/wasm/code-space-access.h",
"src/wasm/compilation-environment.h", "src/wasm/compilation-environment.h",
"src/wasm/decoder.h", "src/wasm/decoder.h",
"src/wasm/function-body-decoder-impl.h", "src/wasm/function-body-decoder-impl.h",

View File

@ -151,6 +151,14 @@ int GetFlagsForMemoryPermission(OS::MemoryPermission access,
#if V8_OS_QNX #if V8_OS_QNX
flags |= MAP_LAZY; flags |= MAP_LAZY;
#endif // V8_OS_QNX #endif // V8_OS_QNX
#if V8_OS_MACOSX && V8_HOST_ARCH_ARM64 && defined(MAP_JIT) && \
!defined(V8_OS_IOS)
// TODO(jkummerow): using the V8_OS_IOS define is a crude approximation
// of the fact that we don't want to set the MAP_JIT flag when
// FLAG_jitless == true, as src/base/ doesn't know any flags.
// TODO(crbug.com/1117591): This is only needed for code spaces.
flags |= MAP_JIT;
#endif
} }
return flags; return flags;
} }

View File

@ -0,0 +1,69 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_WASM_CODE_SPACE_ACCESS_H_
#define V8_WASM_CODE_SPACE_ACCESS_H_
#include "src/base/build_config.h"
#include "src/base/macros.h"
#include "src/common/globals.h"
namespace v8 {
namespace internal {
#if defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
// Ignoring this warning is considered better than relying on
// __builtin_available.
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunguarded-availability-new"
inline void SwitchMemoryPermissionsToWritable() {
pthread_jit_write_protect_np(0);
}
inline void SwitchMemoryPermissionsToExecutable() {
pthread_jit_write_protect_np(1);
}
#pragma clang diagnostic pop
namespace wasm {
class CodeSpaceWriteScope {
public:
// TODO(jkummerow): Background threads could permanently stay in
// writable mode; only the main thread has to switch back and forth.
CodeSpaceWriteScope() {
if (code_space_write_nesting_level_ == 0) {
SwitchMemoryPermissionsToWritable();
}
code_space_write_nesting_level_++;
}
~CodeSpaceWriteScope() {
code_space_write_nesting_level_--;
if (code_space_write_nesting_level_ == 0) {
SwitchMemoryPermissionsToExecutable();
}
}
private:
static thread_local int code_space_write_nesting_level_;
};
#define CODE_SPACE_WRITE_SCOPE CodeSpaceWriteScope _write_access_;
} // namespace wasm
#else // Not Mac-on-arm64.
// Nothing to do, we map code memory with rwx permissions.
inline void SwitchMemoryPermissionsToWritable() {}
inline void SwitchMemoryPermissionsToExecutable() {}
#define CODE_SPACE_WRITE_SCOPE
#endif // V8_OS_MACOSX && V8_HOST_ARCH_ARM64
} // namespace internal
} // namespace v8
#endif // V8_WASM_CODE_SPACE_ACCESS_H_

View File

@ -6,6 +6,7 @@
#include <iomanip> #include <iomanip>
#include "src/base/build_config.h"
#include "src/base/iterator.h" #include "src/base/iterator.h"
#include "src/base/macros.h" #include "src/base/macros.h"
#include "src/base/platform/platform.h" #include "src/base/platform/platform.h"
@ -21,6 +22,7 @@
#include "src/snapshot/embedded/embedded-data.h" #include "src/snapshot/embedded/embedded-data.h"
#include "src/utils/ostreams.h" #include "src/utils/ostreams.h"
#include "src/utils/vector.h" #include "src/utils/vector.h"
#include "src/wasm/code-space-access.h"
#include "src/wasm/compilation-environment.h" #include "src/wasm/compilation-environment.h"
#include "src/wasm/function-compiler.h" #include "src/wasm/function-compiler.h"
#include "src/wasm/jump-table-assembler.h" #include "src/wasm/jump-table-assembler.h"
@ -47,6 +49,10 @@ namespace wasm {
using trap_handler::ProtectedInstructionData; using trap_handler::ProtectedInstructionData;
#if defined(V8_OS_MACOSX) && defined(V8_HOST_ARCH_ARM64)
thread_local int CodeSpaceWriteScope::code_space_write_nesting_level_ = 0;
#endif
base::AddressRegion DisjointAllocationPool::Merge( base::AddressRegion DisjointAllocationPool::Merge(
base::AddressRegion new_region) { base::AddressRegion new_region) {
// Find the possible insertion position by identifying the first region whose // Find the possible insertion position by identifying the first region whose
@ -734,6 +740,7 @@ void WasmCodeAllocator::FreeCode(Vector<WasmCode* const> codes) {
// Zap code area and collect freed code regions. // Zap code area and collect freed code regions.
DisjointAllocationPool freed_regions; DisjointAllocationPool freed_regions;
size_t code_size = 0; size_t code_size = 0;
CODE_SPACE_WRITE_SCOPE
for (WasmCode* code : codes) { for (WasmCode* code : codes) {
ZapCode(code->instruction_start(), code->instructions().size()); ZapCode(code->instruction_start(), code->instructions().size());
FlushInstructionCache(code->instruction_start(), FlushInstructionCache(code->instruction_start(),
@ -866,6 +873,7 @@ CompilationEnv NativeModule::CreateCompilationEnv() const {
} }
WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) { WasmCode* NativeModule::AddCodeForTesting(Handle<Code> code) {
CODE_SPACE_WRITE_SCOPE
// For off-heap builtins, we create a copy of the off-heap instruction stream // For off-heap builtins, we create a copy of the off-heap instruction stream
// instead of the on-heap code object containing the trampoline. Ensure that // instead of the on-heap code object containing the trampoline. Ensure that
// we do not apply the on-heap reloc info to the off-heap instructions. // we do not apply the on-heap reloc info to the off-heap instructions.
@ -961,6 +969,7 @@ void NativeModule::UseLazyStub(uint32_t func_index) {
if (!lazy_compile_table_) { if (!lazy_compile_table_) {
uint32_t num_slots = module_->num_declared_functions; uint32_t num_slots = module_->num_declared_functions;
WasmCodeRefScope code_ref_scope; WasmCodeRefScope code_ref_scope;
CODE_SPACE_WRITE_SCOPE
base::AddressRegion single_code_space_region; base::AddressRegion single_code_space_region;
{ {
base::MutexGuard guard(&allocation_mutex_); base::MutexGuard guard(&allocation_mutex_);
@ -1022,6 +1031,7 @@ std::unique_ptr<WasmCode> NativeModule::AddCodeWithCodeSpace(
const int code_comments_offset = desc.code_comments_offset; const int code_comments_offset = desc.code_comments_offset;
const int instr_size = desc.instr_size; const int instr_size = desc.instr_size;
CODE_SPACE_WRITE_SCOPE
memcpy(dst_code_bytes.begin(), desc.buffer, memcpy(dst_code_bytes.begin(), desc.buffer,
static_cast<size_t>(desc.instr_size)); static_cast<size_t>(desc.instr_size));
@ -1157,6 +1167,7 @@ WasmCode* NativeModule::AddDeserializedCode(
Vector<const byte> protected_instructions_data, Vector<const byte> protected_instructions_data,
Vector<const byte> reloc_info, Vector<const byte> source_position_table, Vector<const byte> reloc_info, Vector<const byte> source_position_table,
WasmCode::Kind kind, ExecutionTier tier) { WasmCode::Kind kind, ExecutionTier tier) {
// CodeSpaceWriteScope is provided by the caller.
Vector<uint8_t> dst_code_bytes = Vector<uint8_t> dst_code_bytes =
code_allocator_.AllocateForCode(this, instructions.size()); code_allocator_.AllocateForCode(this, instructions.size());
memcpy(dst_code_bytes.begin(), instructions.begin(), instructions.size()); memcpy(dst_code_bytes.begin(), instructions.begin(), instructions.size());
@ -1215,6 +1226,7 @@ WasmCode* NativeModule::CreateEmptyJumpTableInRegion(
Vector<uint8_t> code_space = code_allocator_.AllocateForCodeInRegion( Vector<uint8_t> code_space = code_allocator_.AllocateForCodeInRegion(
this, jump_table_size, region, allocator_lock); this, jump_table_size, region, allocator_lock);
DCHECK(!code_space.empty()); DCHECK(!code_space.empty());
CODE_SPACE_WRITE_SCOPE
ZapCode(reinterpret_cast<Address>(code_space.begin()), code_space.size()); ZapCode(reinterpret_cast<Address>(code_space.begin()), code_space.size());
std::unique_ptr<WasmCode> code{ std::unique_ptr<WasmCode> code{
new WasmCode{this, // native_module new WasmCode{this, // native_module
@ -1240,6 +1252,7 @@ void NativeModule::PatchJumpTablesLocked(uint32_t slot_index, Address target) {
// The caller must hold the {allocation_mutex_}, thus we fail to lock it here. // The caller must hold the {allocation_mutex_}, thus we fail to lock it here.
DCHECK(!allocation_mutex_.TryLock()); DCHECK(!allocation_mutex_.TryLock());
CODE_SPACE_WRITE_SCOPE
for (auto& code_space_data : code_space_data_) { for (auto& code_space_data : code_space_data_) {
DCHECK_IMPLIES(code_space_data.jump_table, code_space_data.far_jump_table); DCHECK_IMPLIES(code_space_data.jump_table, code_space_data.far_jump_table);
if (!code_space_data.jump_table) continue; if (!code_space_data.jump_table) continue;
@ -1302,6 +1315,7 @@ void NativeModule::AddCodeSpace(
#endif // V8_OS_WIN64 #endif // V8_OS_WIN64
WasmCodeRefScope code_ref_scope; WasmCodeRefScope code_ref_scope;
CODE_SPACE_WRITE_SCOPE
WasmCode* jump_table = nullptr; WasmCode* jump_table = nullptr;
WasmCode* far_jump_table = nullptr; WasmCode* far_jump_table = nullptr;
const uint32_t num_wasm_functions = module_->num_declared_functions; const uint32_t num_wasm_functions = module_->num_declared_functions;
@ -1863,6 +1877,7 @@ std::vector<std::unique_ptr<WasmCode>> NativeModule::AddCompiledCode(
generated_code.reserve(results.size()); generated_code.reserve(results.size());
// Now copy the generated code into the code space and relocate it. // Now copy the generated code into the code space and relocate it.
CODE_SPACE_WRITE_SCOPE
for (auto& result : results) { for (auto& result : results) {
DCHECK_EQ(result.code_desc.buffer, result.instr_buffer.get()); DCHECK_EQ(result.code_desc.buffer, result.instr_buffer.get());
size_t code_size = RoundUp<kCodeAlignment>(result.code_desc.instr_size); size_t code_size = RoundUp<kCodeAlignment>(result.code_desc.instr_size);

View File

@ -13,6 +13,7 @@
#include "src/utils/ostreams.h" #include "src/utils/ostreams.h"
#include "src/utils/utils.h" #include "src/utils/utils.h"
#include "src/utils/version.h" #include "src/utils/version.h"
#include "src/wasm/code-space-access.h"
#include "src/wasm/function-compiler.h" #include "src/wasm/function-compiler.h"
#include "src/wasm/module-compiler.h" #include "src/wasm/module-compiler.h"
#include "src/wasm/module-decoder.h" #include "src/wasm/module-decoder.h"
@ -534,6 +535,7 @@ void NativeModuleDeserializer::ReadCode(int fn_index, Reader* reader) {
auto protected_instructions = auto protected_instructions =
reader->ReadVector<byte>(protected_instructions_size); reader->ReadVector<byte>(protected_instructions_size);
CODE_SPACE_WRITE_SCOPE
WasmCode* code = native_module_->AddDeserializedCode( WasmCode* code = native_module_->AddDeserializedCode(
fn_index, code_buffer, stack_slot_count, tagged_parameter_slots, fn_index, code_buffer, stack_slot_count, tagged_parameter_slots,
safepoint_table_offset, handler_table_offset, constant_pool_offset, safepoint_table_offset, handler_table_offset, constant_pool_offset,

View File

@ -176,6 +176,13 @@
'test-debug/DebugBreakStackTrace': [PASS, SLOW], 'test-debug/DebugBreakStackTrace': [PASS, SLOW],
}], # 'arch == arm64 and simulator_run' }], # 'arch == arm64 and simulator_run'
['arch == arm64 and system == macos and not simulator_run', {
# printf, being a variadic function, has a different, stack-based ABI on
# Apple silicon. See:
# https://developer.apple.com/library/archive/documentation/Xcode/Conceptual/iPhoneOSABIReference/Articles/ARM64FunctionCallingConventions.html
'test-assembler-arm64/printf_no_preserve': [SKIP],
}], # arch == arm64 and system == macos and not simulator_run
############################################################################## ##############################################################################
['variant == nooptimization and (arch == arm or arch == arm64) and simulator_run', { ['variant == nooptimization and (arch == arm or arch == arm64) and simulator_run', {
# Slow tests: https://crbug.com/v8/7783 # Slow tests: https://crbug.com/v8/7783

View File

@ -11720,9 +11720,9 @@ TEST(system_msr) {
const uint64_t fpcr_core = 0x07C00000; const uint64_t fpcr_core = 0x07C00000;
// All FPCR fields (including fields which may be read-as-zero): // All FPCR fields (including fields which may be read-as-zero):
// Stride, Len // Stride, FZ16, Len
// IDE, IXE, UFE, OFE, DZE, IOE // IDE, IXE, UFE, OFE, DZE, IOE
const uint64_t fpcr_all = fpcr_core | 0x00379F00; const uint64_t fpcr_all = fpcr_core | 0x003F9F00;
SETUP(); SETUP();

View File

@ -41,8 +41,9 @@ template <class T>
using TVariable = TypedCodeAssemblerVariable<T>; using TVariable = TypedCodeAssemblerVariable<T>;
using PromiseResolvingFunctions = TorqueStructPromiseResolvingFunctions; using PromiseResolvingFunctions = TorqueStructPromiseResolvingFunctions;
int sum10(int a0, int a1, int a2, int a3, int a4, int a5, int a6, int a7, intptr_t sum10(intptr_t a0, intptr_t a1, intptr_t a2, intptr_t a3, intptr_t a4,
int a8, int a9) { intptr_t a5, intptr_t a6, intptr_t a7, intptr_t a8,
intptr_t a9) {
return a0 + a1 + a2 + a3 + a4 + a5 + a6 + a7 + a8 + a9; return a0 + a1 + a2 + a3 + a4 + a5 + a6 + a7 + a8 + a9;
} }

View File

@ -6,6 +6,7 @@
#include "src/codegen/macro-assembler-inl.h" #include "src/codegen/macro-assembler-inl.h"
#include "src/execution/simulator.h" #include "src/execution/simulator.h"
#include "src/handles/handles-inl.h" #include "src/handles/handles-inl.h"
#include "src/wasm/code-space-access.h"
#include "test/cctest/cctest.h" #include "test/cctest/cctest.h"
#include "test/common/assembler-tester.h" #include "test/common/assembler-tester.h"
@ -179,11 +180,15 @@ TEST(TestFlushICacheOfWritableAndExecutable) {
CHECK(SetPermissions(GetPlatformPageAllocator(), buffer->start(), CHECK(SetPermissions(GetPlatformPageAllocator(), buffer->start(),
buffer->size(), v8::PageAllocator::kReadWriteExecute)); buffer->size(), v8::PageAllocator::kReadWriteExecute));
SwitchMemoryPermissionsToWritable();
FloodWithInc(isolate, buffer.get()); FloodWithInc(isolate, buffer.get());
FlushInstructionCache(buffer->start(), buffer->size()); FlushInstructionCache(buffer->start(), buffer->size());
SwitchMemoryPermissionsToExecutable();
CHECK_EQ(23 + kNumInstr, f.Call(23)); // Call into generated code. CHECK_EQ(23 + kNumInstr, f.Call(23)); // Call into generated code.
SwitchMemoryPermissionsToWritable();
FloodWithNop(isolate, buffer.get()); FloodWithNop(isolate, buffer.get());
FlushInstructionCache(buffer->start(), buffer->size()); FlushInstructionCache(buffer->start(), buffer->size());
SwitchMemoryPermissionsToExecutable();
CHECK_EQ(23, f.Call(23)); // Call into generated code. CHECK_EQ(23, f.Call(23)); // Call into generated code.
} }
} }

View File

@ -8,6 +8,7 @@
#include "src/codegen/macro-assembler-inl.h" #include "src/codegen/macro-assembler-inl.h"
#include "src/execution/simulator.h" #include "src/execution/simulator.h"
#include "src/utils/utils.h" #include "src/utils/utils.h"
#include "src/wasm/code-space-access.h"
#include "src/wasm/jump-table-assembler.h" #include "src/wasm/jump-table-assembler.h"
#include "test/cctest/cctest.h" #include "test/cctest/cctest.h"
#include "test/common/assembler-tester.h" #include "test/common/assembler-tester.h"
@ -33,7 +34,12 @@ constexpr uint32_t kJumpTableSize =
JumpTableAssembler::SizeForNumberOfSlots(kJumpTableSlotCount); JumpTableAssembler::SizeForNumberOfSlots(kJumpTableSlotCount);
// Must be a safe commit page size. // Must be a safe commit page size.
#if V8_OS_MACOSX && V8_HOST_ARCH_ARM64
// See kAppleArmPageSize in platform-posix.cc.
constexpr size_t kThunkBufferSize = 1 << 14;
#else
constexpr size_t kThunkBufferSize = 4 * KB; constexpr size_t kThunkBufferSize = 4 * KB;
#endif
#if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64 #if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64
// We need the branches (from CompileJumpTableThunk) to be within near-call // We need the branches (from CompileJumpTableThunk) to be within near-call
@ -162,6 +168,7 @@ class JumpTableRunner : public v8::base::Thread {
void Run() override { void Run() override {
TRACE("Runner #%d is starting ...\n", runner_id_); TRACE("Runner #%d is starting ...\n", runner_id_);
SwitchMemoryPermissionsToExecutable();
GeneratedCode<void>::FromAddress(CcTest::i_isolate(), slot_address_).Call(); GeneratedCode<void>::FromAddress(CcTest::i_isolate(), slot_address_).Call();
TRACE("Runner #%d is stopping ...\n", runner_id_); TRACE("Runner #%d is stopping ...\n", runner_id_);
USE(runner_id_); USE(runner_id_);
@ -184,6 +191,7 @@ class JumpTablePatcher : public v8::base::Thread {
void Run() override { void Run() override {
TRACE("Patcher %p is starting ...\n", this); TRACE("Patcher %p is starting ...\n", this);
SwitchMemoryPermissionsToWritable();
Address slot_address = Address slot_address =
slot_start_ + JumpTableAssembler::JumpSlotIndexToOffset(slot_index_); slot_start_ + JumpTableAssembler::JumpSlotIndexToOffset(slot_index_);
// First, emit code to the two thunks. // First, emit code to the two thunks.
@ -233,6 +241,7 @@ TEST(JumpTablePatchingStress) {
std::bitset<kAvailableBufferSlots> used_thunk_slots; std::bitset<kAvailableBufferSlots> used_thunk_slots;
buffer->MakeWritableAndExecutable(); buffer->MakeWritableAndExecutable();
SwitchMemoryPermissionsToWritable();
// Iterate through jump-table slots to hammer at different alignments within // Iterate through jump-table slots to hammer at different alignments within
// the jump-table, thereby increasing stress for variable-length ISAs. // the jump-table, thereby increasing stress for variable-length ISAs.

View File

@ -17,6 +17,27 @@
'RandomNumberGenerator.NextSampleSlowInvalidParam2': [SKIP], 'RandomNumberGenerator.NextSampleSlowInvalidParam2': [SKIP],
}], # system == macos and asan }], # system == macos and asan
['system == macos and arch == arm64 and not simulator_run', {
# Throwing C++ exceptions doesn't work; probably because the unittests
# binary is built with -fno-exceptions?
'LanguageServerJson.LexerError': [SKIP],
'LanguageServerJson.ParserError': [SKIP],
'Torque.DoubleUnderScorePrefixIllegalForIdentifiers': [SKIP],
'Torque.Enums': [SKIP],
'Torque.ImportNonExistentFile': [SKIP],
# Test uses fancy signal handling. Needs investigation.
'MemoryAllocationPermissionsTest.DoTest': [SKIP],
# cppgc::internal::kGuardPageSize is smaller than kAppleArmPageSize.
'PageMemoryRegionTest.PlatformUsesGuardPages': [FAIL],
# Time tick resolution appears to be ~42 microseconds. Tests expect 1 us.
'TimeTicks.NowResolution': [FAIL],
'RuntimeCallStatsTest.BasicJavaScript': [SKIP],
'RuntimeCallStatsTest.FunctionLengthGetter': [SKIP],
}], # system == macos and arch == arm64 and not simulator_run
############################################################################## ##############################################################################
['lite_mode or variant == jitless', { ['lite_mode or variant == jitless', {
# TODO(v8:7777): Re-enable once wasm is supported in jitless mode. # TODO(v8:7777): Re-enable once wasm is supported in jitless mode.