[wasm] Remove mprotect-based code protection

With lazy compilation we disabled mprotect-based code protection. We
currently have no users and no test coverage of that flag. Hence remove
it from the code base.

R=ahaas@chromium.org

Bug: v8:13632
Change-Id: I1e39499dfbdb896287901b97c32f00366449c466
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4114296
Reviewed-by: Andreas Haas <ahaas@chromium.org>
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/main@{#85080}
This commit is contained in:
Clemens Backes 2022-12-28 12:05:07 +01:00 committed by V8 LUCI CQ
parent c5bf7a36b6
commit 8a565c39d0
6 changed files with 27 additions and 385 deletions

View File

@ -1024,11 +1024,8 @@ DEFINE_INT(wasm_num_compilation_tasks, 128,
DEFINE_VALUE_IMPLICATION(single_threaded, wasm_num_compilation_tasks, 0)
DEFINE_DEBUG_BOOL(trace_wasm_native_heap, false,
"trace wasm native heap events")
DEFINE_BOOL(wasm_write_protect_code_memory, false,
"write protect code memory on the wasm native heap with mprotect")
DEFINE_BOOL(wasm_memory_protection_keys, true,
"protect wasm code memory with PKU if available (takes precedence "
"over --wasm-write-protect-code-memory)")
"protect wasm code memory with PKU if available")
DEFINE_DEBUG_BOOL(trace_wasm_serialization, false,
"trace serialization/deserialization")
DEFINE_BOOL(wasm_async_compilation, true,
@ -2247,9 +2244,6 @@ DEFINE_PERF_PROF_BOOL(
DEFINE_NEG_IMPLICATION(perf_prof, compact_code_space)
// TODO(v8:8462) Remove implication once perf supports remapping.
DEFINE_NEG_IMPLICATION(perf_prof, write_protect_code_memory)
#if V8_ENABLE_WEBASSEMBLY
DEFINE_NEG_IMPLICATION(perf_prof, wasm_write_protect_code_memory)
#endif // V8_ENABLE_WEBASSEMBLY
// --perf-prof-unwinding-info is available only on selected architectures.
#if !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_X64 && \

View File

@ -12,46 +12,18 @@ namespace v8 {
namespace internal {
namespace wasm {
namespace {
// For PKU and if MAP_JIT is available, the CodeSpaceWriteScope does not
// actually make use of the supplied {NativeModule}. In fact, there are
// situations where we can't provide a specific {NativeModule} to the scope. For
// those situations, we use this dummy pointer instead.
NativeModule* GetDummyNativeModule() {
static struct alignas(NativeModule) DummyNativeModule {
char content;
} dummy_native_module;
return reinterpret_cast<NativeModule*>(&dummy_native_module);
}
} // namespace
thread_local NativeModule* CodeSpaceWriteScope::current_native_module_ =
nullptr;
thread_local int CodeSpaceWriteScope::scope_depth_ = 0;
// TODO(jkummerow): Background threads could permanently stay in
// writable mode; only the main thread has to switch back and forth.
CodeSpaceWriteScope::CodeSpaceWriteScope(NativeModule* native_module)
: previous_native_module_(current_native_module_) {
if (!native_module) {
// Passing in a {nullptr} is OK if we don't use that pointer anyway.
// Internally, we need a non-nullptr though to know whether a scope is
// already open from looking at {current_native_module_}.
DCHECK(!SwitchingPerNativeModule());
native_module = GetDummyNativeModule();
}
if (previous_native_module_ == native_module) return;
current_native_module_ = native_module;
if (previous_native_module_ == nullptr || SwitchingPerNativeModule()) {
SetWritable();
}
CodeSpaceWriteScope::CodeSpaceWriteScope(NativeModule* native_module) {
DCHECK_LE(0, scope_depth_);
if (++scope_depth_ == 1) SetWritable();
}
CodeSpaceWriteScope::~CodeSpaceWriteScope() {
if (previous_native_module_ == current_native_module_) return;
if (previous_native_module_ == nullptr || SwitchingPerNativeModule()) {
SetExecutable();
}
current_native_module_ = previous_native_module_;
DCHECK_LT(0, scope_depth_);
if (--scope_depth_ == 0) SetExecutable();
}
#if V8_HAS_PTHREAD_JIT_WRITE_PROTECT
@ -64,17 +36,12 @@ void CodeSpaceWriteScope::SetExecutable() {
RwxMemoryWriteScope::SetExecutable();
}
// static
bool CodeSpaceWriteScope::SwitchingPerNativeModule() { return false; }
#else // !V8_HAS_PTHREAD_JIT_WRITE_PROTECT
// static
void CodeSpaceWriteScope::SetWritable() {
if (WasmCodeManager::MemoryProtectionKeysEnabled()) {
RwxMemoryWriteScope::SetWritable();
} else if (v8_flags.wasm_write_protect_code_memory) {
current_native_module_->AddWriter();
}
}
@ -83,17 +50,9 @@ void CodeSpaceWriteScope::SetExecutable() {
if (WasmCodeManager::MemoryProtectionKeysEnabled()) {
DCHECK(v8_flags.wasm_memory_protection_keys);
RwxMemoryWriteScope::SetExecutable();
} else if (v8_flags.wasm_write_protect_code_memory) {
current_native_module_->RemoveWriter();
}
}
// static
bool CodeSpaceWriteScope::SwitchingPerNativeModule() {
return !WasmCodeManager::MemoryProtectionKeysEnabled() &&
v8_flags.wasm_write_protect_code_memory;
}
#endif // !V8_HAS_PTHREAD_JIT_WRITE_PROTECT
} // namespace wasm

View File

@ -49,27 +49,16 @@ class V8_NODISCARD CodeSpaceWriteScope final {
CodeSpaceWriteScope(const CodeSpaceWriteScope&) = delete;
CodeSpaceWriteScope& operator=(const CodeSpaceWriteScope&) = delete;
static bool IsInScope() { return current_native_module_ != nullptr; }
static bool IsInScope() {
DCHECK_LE(0, scope_depth_);
return scope_depth_ != 0;
}
private:
// The M1 implementation knows implicitly from the {MAP_JIT} flag during
// allocation which region to switch permissions for. On non-M1 hardware
// without memory protection key support, we need the code space from the
// {NativeModule}.
static thread_local NativeModule* current_native_module_;
static thread_local int scope_depth_;
// {SetWritable} and {SetExecutable} implicitly operate on
// {current_native_module_} (for mprotect-based protection).
static void SetWritable();
static void SetExecutable();
// Returns {true} if switching permissions happens on a per-module level, and
// not globally (like for MAP_JIT and PKU).
static bool SwitchingPerNativeModule();
// Save the previous module to put it back in {current_native_module_} when
// exiting this scope.
NativeModule* const previous_native_module_;
};
} // namespace v8::internal::wasm

View File

@ -516,10 +516,7 @@ int WasmCode::GetSourcePositionBefore(int offset) {
}
WasmCodeAllocator::WasmCodeAllocator(std::shared_ptr<Counters> async_counters)
: protect_code_memory_(!V8_HAS_PTHREAD_JIT_WRITE_PROTECT &&
v8_flags.wasm_write_protect_code_memory &&
!WasmCodeManager::MemoryProtectionKeysEnabled()),
async_counters_(std::move(async_counters)) {
: async_counters_(std::move(async_counters)) {
owned_code_space_.reserve(4);
}
@ -638,67 +635,6 @@ size_t ReservationSize(size_t code_size_estimate, int num_declared_functions,
return reserve_size;
}
#ifdef DEBUG
// Check postconditions when returning from this method:
// 1) {region} must be fully contained in {writable_memory_};
// 2) {writable_memory_} must be a maximally merged ordered set of disjoint
// non-empty regions.
class CheckWritableMemoryRegions {
public:
CheckWritableMemoryRegions(
std::set<base::AddressRegion, base::AddressRegion::StartAddressLess>&
writable_memory,
base::AddressRegion new_region, size_t& new_writable_memory)
: writable_memory_(writable_memory),
new_region_(new_region),
new_writable_memory_(new_writable_memory),
old_writable_size_(std::accumulate(
writable_memory_.begin(), writable_memory_.end(), size_t{0},
[](size_t old, base::AddressRegion region) {
return old + region.size();
})) {}
~CheckWritableMemoryRegions() {
// {new_region} must be contained in {writable_memory_}.
DCHECK(std::any_of(
writable_memory_.begin(), writable_memory_.end(),
[this](auto region) { return region.contains(new_region_); }));
// The new total size of writable memory must have increased by
// {new_writable_memory}.
size_t total_writable_size = std::accumulate(
writable_memory_.begin(), writable_memory_.end(), size_t{0},
[](size_t old, auto region) { return old + region.size(); });
DCHECK_EQ(old_writable_size_ + new_writable_memory_, total_writable_size);
// There are no empty regions.
DCHECK(std::none_of(writable_memory_.begin(), writable_memory_.end(),
[](auto region) { return region.is_empty(); }));
// Regions are sorted and disjoint. (std::accumulate has nodiscard on msvc
// so USE is required to prevent build failures in debug builds).
USE(std::accumulate(writable_memory_.begin(), writable_memory_.end(),
Address{0}, [](Address previous_end, auto region) {
DCHECK_LT(previous_end, region.begin());
return region.end();
}));
}
private:
const std::set<base::AddressRegion, base::AddressRegion::StartAddressLess>&
writable_memory_;
const base::AddressRegion new_region_;
const size_t& new_writable_memory_;
const size_t old_writable_size_;
};
#else // !DEBUG
class CheckWritableMemoryRegions {
public:
template <typename... Args>
explicit CheckWritableMemoryRegions(Args...) {}
};
#endif // !DEBUG
// Sentinel value to be used for {AllocateForCodeInRegion} for specifying no
// restriction on the region to allocate in.
constexpr base::AddressRegion kUnrestrictedRegion{
@ -755,9 +691,6 @@ base::Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion(
}
const Address commit_page_size = CommitPageSize();
Address commit_start = RoundUp(code_space.begin(), commit_page_size);
if (commit_start != code_space.begin()) {
MakeWritable({commit_start - commit_page_size, commit_page_size});
}
Address commit_end = RoundUp(code_space.end(), commit_page_size);
// {commit_start} will be either code_space.start or the start of the next
@ -777,11 +710,6 @@ base::Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion(
// Committed code cannot grow bigger than maximum code space size.
DCHECK_LE(committed_code_space_.load(),
v8_flags.wasm_max_committed_code_mb * MB);
if (protect_code_memory_) {
DCHECK_LT(0, writers_count_);
InsertIntoWritableRegions({commit_start, commit_end - commit_start},
false);
}
}
DCHECK(IsAligned(code_space.begin(), kCodeAlignment));
generated_code_size_.fetch_add(code_space.size(), std::memory_order_relaxed);
@ -791,52 +719,6 @@ base::Vector<byte> WasmCodeAllocator::AllocateForCodeInRegion(
return {reinterpret_cast<byte*>(code_space.begin()), code_space.size()};
}
// TODO(dlehmann): Ensure that {AddWriter()} is always paired up with a
// {RemoveWriter}, such that eventually the code space is write protected.
// One solution is to make the API foolproof by hiding {SetWritable()} and
// allowing change of permissions only through {CodeSpaceWriteScope}.
// TODO(dlehmann): Add tests that ensure the code space is eventually write-
// protected.
void WasmCodeAllocator::AddWriter() {
DCHECK(protect_code_memory_);
++writers_count_;
}
void WasmCodeAllocator::RemoveWriter() {
DCHECK(protect_code_memory_);
DCHECK_GT(writers_count_, 0);
if (--writers_count_ > 0) return;
// Switch all memory to non-writable.
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
for (base::AddressRegion writable : writable_memory_) {
for (base::AddressRegion split_range :
SplitRangeByReservationsIfNeeded(writable, owned_code_space_)) {
TRACE_HEAP("Set 0x%" V8PRIxPTR ":0x%" V8PRIxPTR " to RX\n",
split_range.begin(), split_range.end());
CHECK(SetPermissions(page_allocator, split_range.begin(),
split_range.size(), PageAllocator::kReadExecute));
}
}
writable_memory_.clear();
}
void WasmCodeAllocator::MakeWritable(base::AddressRegion region) {
if (!protect_code_memory_) return;
DCHECK_LT(0, writers_count_);
DCHECK(!region.is_empty());
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
// Align to commit page size.
size_t commit_page_size = page_allocator->CommitPageSize();
DCHECK(base::bits::IsPowerOfTwo(commit_page_size));
Address begin = RoundDown(region.begin(), commit_page_size);
Address end = RoundUp(region.end(), commit_page_size);
region = base::AddressRegion(begin, end - begin);
InsertIntoWritableRegions(region, true);
}
void WasmCodeAllocator::FreeCode(base::Vector<WasmCode* const> codes) {
// Zap code area and collect freed code regions.
DisjointAllocationPool freed_regions;
@ -881,84 +763,6 @@ size_t WasmCodeAllocator::GetNumCodeSpaces() const {
return owned_code_space_.size();
}
void WasmCodeAllocator::InsertIntoWritableRegions(base::AddressRegion region,
bool switch_to_writable) {
size_t new_writable_memory = 0;
CheckWritableMemoryRegions check_on_return{writable_memory_, region,
new_writable_memory};
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
// Subroutine to make a non-writable region writable (if {switch_to_writable}
// is {true}) and insert it into {writable_memory_}.
auto make_writable = [&](decltype(writable_memory_)::iterator insert_pos,
base::AddressRegion region) {
new_writable_memory += region.size();
if (switch_to_writable) {
for (base::AddressRegion split_range :
SplitRangeByReservationsIfNeeded(region, owned_code_space_)) {
TRACE_HEAP("Set 0x%" V8PRIxPTR ":0x%" V8PRIxPTR " to RWX\n",
split_range.begin(), split_range.end());
CHECK(SetPermissions(page_allocator, split_range.begin(),
split_range.size(),
PageAllocator::kReadWriteExecute));
}
}
// Insert {region} into {writable_memory_} before {insert_pos}, potentially
// merging it with the surrounding regions.
if (insert_pos != writable_memory_.begin()) {
auto previous = insert_pos;
--previous;
if (previous->end() == region.begin()) {
region = {previous->begin(), previous->size() + region.size()};
writable_memory_.erase(previous);
}
}
if (insert_pos != writable_memory_.end() &&
region.end() == insert_pos->begin()) {
region = {region.begin(), insert_pos->size() + region.size()};
insert_pos = writable_memory_.erase(insert_pos);
}
writable_memory_.insert(insert_pos, region);
};
DCHECK(!region.is_empty());
// Find a possible insertion position by identifying the first region whose
// start address is not less than that of {new_region}, and the starting the
// merge from the existing region before that.
auto it = writable_memory_.lower_bound(region);
if (it != writable_memory_.begin()) --it;
for (;; ++it) {
if (it == writable_memory_.end() || it->begin() >= region.end()) {
// No overlap; add before {it}.
make_writable(it, region);
return;
}
if (it->end() <= region.begin()) continue; // Continue after {it}.
base::AddressRegion overlap = it->GetOverlap(region);
DCHECK(!overlap.is_empty());
if (overlap.begin() == region.begin()) {
if (overlap.end() == region.end()) return; // Fully contained already.
// Remove overlap (which is already writable) and continue.
region = {overlap.end(), region.end() - overlap.end()};
continue;
}
if (overlap.end() == region.end()) {
// Remove overlap (which is already writable), then make the remaining
// region writable.
region = {region.begin(), overlap.begin() - region.begin()};
make_writable(it, region);
return;
}
// Split {region}, make the split writable, and continue with the rest.
base::AddressRegion split = {region.begin(),
overlap.begin() - region.begin()};
make_writable(it, split);
region = {overlap.end(), region.end() - overlap.end()};
}
}
namespace {
BoundsCheckStrategy GetBoundsChecks(const WasmModule* module) {
if (!v8_flags.wasm_bounds_checks) return kNoBoundsChecks;
@ -1558,22 +1362,6 @@ void NativeModule::PatchJumpTableLocked(const CodeSpaceData& code_space_data,
DCHECK_NOT_NULL(code_space_data.jump_table);
DCHECK_NOT_NULL(code_space_data.far_jump_table);
// Jump tables are often allocated next to each other, so we can switch
// permissions on both at the same time.
if (code_space_data.jump_table->instructions().end() ==
code_space_data.far_jump_table->instructions().begin()) {
base::Vector<uint8_t> jump_tables_space = base::VectorOf(
code_space_data.jump_table->instructions().begin(),
code_space_data.jump_table->instructions().size() +
code_space_data.far_jump_table->instructions().size());
code_allocator_.MakeWritable(AddressRegionOf(jump_tables_space));
} else {
code_allocator_.MakeWritable(
AddressRegionOf(code_space_data.jump_table->instructions()));
code_allocator_.MakeWritable(
AddressRegionOf(code_space_data.far_jump_table->instructions()));
}
DCHECK_LT(slot_index, module_->num_declared_functions);
Address jump_table_slot =
code_space_data.jump_table->instruction_start() +
@ -1955,23 +1743,13 @@ void WasmCodeManager::Commit(base::AddressRegion region) {
break;
}
}
// Even when we employ W^X with v8_flags.wasm_write_protect_code_memory ==
// true, code pages need to be initially allocated with RWX permission because
// of concurrent compilation/execution. For this reason there is no
// distinction here based on v8_flags.wasm_write_protect_code_memory.
// TODO(dlehmann): This allocates initially as writable and executable, and
// as such is not safe-by-default. In particular, if
// {WasmCodeAllocator::SetWritable(false)} is never called afterwards (e.g.,
// because no {CodeSpaceWriteScope} is created), the writable permission is
// never withdrawn.
// One potential fix is to allocate initially with kReadExecute only, which
// forces all compilation threads to add the missing {CodeSpaceWriteScope}s
// before modification; and/or adding DCHECKs that {CodeSpaceWriteScope} is
// open when calling this method.
// Allocate with RWX permissions; this will be restricted via PKU if
// available and enabled.
PageAllocator::Permission permission = PageAllocator::kReadWriteExecute;
bool success = false;
if (MemoryProtectionKeysEnabled()) {
DCHECK(CodeSpaceWriteScope::IsInScope());
#if V8_HAS_PKU_JIT_WRITE_PROTECT
TRACE_HEAP(
"Setting rwx permissions and memory protection key for 0x%" PRIxPTR

View File

@ -546,20 +546,6 @@ class WasmCodeAllocator {
base::Vector<byte> AllocateForCodeInRegion(NativeModule*, size_t size,
base::AddressRegion);
// Increases or decreases the {writers_count_} field. While there is at least
// one writer, it is allowed to call {MakeWritable} to make regions writable.
// When the last writer is removed, all code is switched back to
// write-protected.
// Hold the {NativeModule}'s {allocation_mutex_} when calling one of these
// methods. The methods should only be called via {CodeSpaceWriteScope}.
V8_EXPORT_PRIVATE void AddWriter();
V8_EXPORT_PRIVATE void RemoveWriter();
// Make a code region writable. Only allowed if there is at lease one writer
// (see above).
// Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
V8_EXPORT_PRIVATE void MakeWritable(base::AddressRegion);
// Free memory pages of all given code objects. Used for wasm code GC.
// Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
void FreeCode(base::Vector<WasmCode* const>);
@ -571,9 +557,6 @@ class WasmCodeAllocator {
Counters* counters() const { return async_counters_.get(); }
private:
void InsertIntoWritableRegions(base::AddressRegion region,
bool switch_to_writable);
//////////////////////////////////////////////////////////////////////////////
// These fields are protected by the mutex in {NativeModule}.
@ -585,18 +568,9 @@ class WasmCodeAllocator {
DisjointAllocationPool freed_code_space_;
std::vector<VirtualMemory> owned_code_space_;
// The following two fields are only used if {protect_code_memory_} is true.
int writers_count_{0};
std::set<base::AddressRegion, base::AddressRegion::StartAddressLess>
writable_memory_;
// End of fields protected by {mutex_}.
//////////////////////////////////////////////////////////////////////////////
// {protect_code_memory_} is true if traditional memory permission switching
// is used to protect code space. It is false if {MAP_JIT} on Mac or PKU is
// being used, or protection is completely disabled.
const bool protect_code_memory_;
std::atomic<size_t> committed_code_space_{0};
std::atomic<size_t> generated_code_size_{0};
std::atomic<size_t> freed_code_size_{0};
@ -701,21 +675,6 @@ class V8_EXPORT_PRIVATE NativeModule final {
// to a function index.
uint32_t GetFunctionIndexFromJumpTableSlot(Address slot_address) const;
void AddWriter() {
base::RecursiveMutexGuard guard{&allocation_mutex_};
code_allocator_.AddWriter();
}
void RemoveWriter() {
base::RecursiveMutexGuard guard{&allocation_mutex_};
code_allocator_.RemoveWriter();
}
void MakeWritable(base::AddressRegion region) {
base::RecursiveMutexGuard guard{&allocation_mutex_};
code_allocator_.MakeWritable(region);
}
// For cctests, where we build both WasmModule and the runtime objects
// on the fly, and bypass the instance builder pipeline.
void ReserveCodeTableForTesting(uint32_t max_functions);

View File

@ -21,15 +21,11 @@
#include "test/unittests/test-utils.h"
#include "testing/gmock/include/gmock/gmock-matchers.h"
namespace v8 {
namespace internal {
namespace wasm {
namespace v8::internal::wasm {
enum MemoryProtectionMode {
kNoProtection,
kPku,
kMprotect,
kPkuWithMprotectFallback
};
const char* MemoryProtectionModeToString(MemoryProtectionMode mode) {
@ -38,10 +34,6 @@ const char* MemoryProtectionModeToString(MemoryProtectionMode mode) {
return "NoProtection";
case kPku:
return "Pku";
case kMprotect:
return "Mprotect";
case kPkuWithMprotectFallback:
return "PkuWithMprotectFallback";
}
}
@ -50,15 +42,10 @@ class MemoryProtectionTest : public TestWithNativeContext {
void Initialize(MemoryProtectionMode mode) {
v8_flags.wasm_lazy_compilation = false;
mode_ = mode;
bool enable_pku = mode == kPku || mode == kPkuWithMprotectFallback;
v8_flags.wasm_memory_protection_keys = enable_pku;
v8_flags.wasm_memory_protection_keys = (mode == kPku);
// The key is initially write-protected.
CHECK_IMPLIES(WasmCodeManager::HasMemoryProtectionKeySupport(),
!WasmCodeManager::MemoryProtectionKeyWritable());
bool enable_mprotect =
mode == kMprotect || mode == kPkuWithMprotectFallback;
v8_flags.wasm_write_protect_code_memory = enable_mprotect;
}
void CompileModule() {
@ -72,11 +59,7 @@ class MemoryProtectionTest : public TestWithNativeContext {
WasmCode* code() const { return code_; }
bool code_is_protected() {
return V8_HAS_PTHREAD_JIT_WRITE_PROTECT || uses_pku() || uses_mprotect();
}
void MakeCodeWritable() {
native_module_->MakeWritable(base::AddressRegionOf(code_->instructions()));
return V8_HAS_PTHREAD_JIT_WRITE_PROTECT || uses_pku();
}
void WriteToCode() { code_->instructions()[0] = 0; }
@ -87,28 +70,18 @@ class MemoryProtectionTest : public TestWithNativeContext {
WriteToCode();
return;
}
// Tier-up might be running and unprotecting the code region temporarily (if
// using mprotect). In that case, repeatedly write to the code region to
// make us eventually crash.
ASSERT_DEATH_IF_SUPPORTED(
do {
{
WriteToCode();
base::OS::Sleep(base::TimeDelta::FromMilliseconds(10));
} while (uses_mprotect()),
},
"");
}
bool uses_mprotect() {
// M1 always uses MAP_JIT.
if (V8_HAS_PTHREAD_JIT_WRITE_PROTECT) return false;
return mode_ == kMprotect ||
(mode_ == kPkuWithMprotectFallback && !uses_pku());
}
bool uses_pku() {
// M1 always uses MAP_JIT.
if (V8_HAS_PTHREAD_JIT_WRITE_PROTECT) return false;
bool param_has_pku = mode_ == kPku || mode_ == kPkuWithMprotectFallback;
bool param_has_pku = mode_ == kPku;
return param_has_pku && WasmCodeManager::HasMemoryProtectionKeySupport();
}
@ -157,8 +130,7 @@ std::string PrintMemoryProtectionTestParam(
}
INSTANTIATE_TEST_SUITE_P(MemoryProtection, ParameterizedMemoryProtectionTest,
::testing::Values(kNoProtection, kPku, kMprotect,
kPkuWithMprotectFallback),
::testing::Values(kNoProtection, kPku),
PrintMemoryProtectionTestParam);
TEST_P(ParameterizedMemoryProtectionTest, CodeNotWritableAfterCompilation) {
@ -169,7 +141,6 @@ TEST_P(ParameterizedMemoryProtectionTest, CodeNotWritableAfterCompilation) {
TEST_P(ParameterizedMemoryProtectionTest, CodeWritableWithinScope) {
CompileModule();
CodeSpaceWriteScope write_scope(native_module());
MakeCodeWritable();
WriteToCode();
}
@ -177,7 +148,6 @@ TEST_P(ParameterizedMemoryProtectionTest, CodeNotWritableAfterScope) {
CompileModule();
{
CodeSpaceWriteScope write_scope(native_module());
MakeCodeWritable();
WriteToCode();
}
AssertCodeEventuallyProtected();
@ -264,8 +234,7 @@ std::string PrintMemoryProtectionAndSignalHandlingTestParam(
INSTANTIATE_TEST_SUITE_P(
MemoryProtection, ParameterizedMemoryProtectionTestWithSignalHandling,
::testing::Combine(::testing::Values(kNoProtection, kPku, kMprotect,
kPkuWithMprotectFallback),
::testing::Combine(::testing::Values(kNoProtection, kPku),
::testing::Bool(), ::testing::Bool()),
PrintMemoryProtectionAndSignalHandlingTestParam);
@ -303,16 +272,12 @@ TEST_P(ParameterizedMemoryProtectionTestWithSignalHandling, TestSignalHandler) {
// second parameter, and not a matcher as {ASSERT_DEATH}.
#if GTEST_HAS_DEATH_TEST
ASSERT_DEATH(
// The signal handler should crash, but it might "accidentally"
// succeed if tier-up is running in the background and using mprotect
// to unprotect the code for the whole process. In that case we
// repeatedly send the signal until we crash.
do {
{
base::Optional<CodeSpaceWriteScope> write_scope;
if (open_write_scope) write_scope.emplace(native_module());
pthread_kill(pthread_self(), SIGPROF);
base::OS::Sleep(base::TimeDelta::FromMilliseconds(10));
} while (uses_mprotect()), // Only loop for mprotect.
},
// Check that the subprocess tried to write, but did not succeed.
::testing::AnyOf(
// non-sanitizer builds:
@ -340,6 +305,4 @@ TEST_P(ParameterizedMemoryProtectionTestWithSignalHandling, TestSignalHandler) {
}
#endif // V8_OS_POSIX && !V8_OS_FUCHSIA
} // namespace wasm
} // namespace internal
} // namespace v8
} // namespace v8::internal::wasm