[wasm] Avoid passing nullptr to CodeSpaceWriteScope

After https://crrev.com/c/3484317, passing {nullptr} to the
{CodeSpaceWriteScope} won't work any more. Since the tests do not have a
{NativeModule} to pass instead, make them use
{pthread_jit_write_protect_np} directly.

The jump-table assembler tests have dedicated threads for writing and
executing the code, so we just switch once per thread. The icache test
switches between writing and executing, so we use a little struct for
switching.

R=jkummerow@chromium.org, tebbi@chromium.org

Bug: v8:12644, v8:11974
Change-Id: I116f3ad75454f749cdc4635802a4617ff91548b2
Cq-Include-Trybots: luci.v8.try:v8_mac_arm64_rel_ng
Cq-Include-Trybots: luci.v8.try:v8_mac_arm64_dbg_ng
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3487995
Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
Reviewed-by: Jakob Kummerow <jkummerow@chromium.org>
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79290}
This commit is contained in:
Clemens Backes 2022-02-25 17:04:17 +01:00 committed by V8 LUCI CQ
parent 2db140b513
commit 5d4acc4eea
4 changed files with 35 additions and 17 deletions

View File

@ -18,6 +18,7 @@ thread_local NativeModule* CodeSpaceWriteScope::current_native_module_ =
// writable mode; only the main thread has to switch back and forth.
CodeSpaceWriteScope::CodeSpaceWriteScope(NativeModule* native_module)
: previous_native_module_(current_native_module_) {
DCHECK_NOT_NULL(native_module);
if (previous_native_module_ == native_module) return;
current_native_module_ = native_module;
if (previous_native_module_ == nullptr || SwitchingPerNativeModule()) {

View File

@ -45,7 +45,7 @@ class NativeModule;
// permissions for all code pages.
class V8_NODISCARD CodeSpaceWriteScope final {
public:
explicit V8_EXPORT_PRIVATE CodeSpaceWriteScope(NativeModule* native_module);
explicit V8_EXPORT_PRIVATE CodeSpaceWriteScope(NativeModule*);
V8_EXPORT_PRIVATE ~CodeSpaceWriteScope();
// Disable copy constructor and copy-assignment operator, since this manages

View File

@ -184,6 +184,24 @@ TEST(TestFlushICacheOfWritableAndExecutable) {
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
struct V8_NODISCARD EnableWritePermissionsOnMacArm64Scope {
#if defined(V8_OS_DARWIN) && defined(V8_HOST_ARCH_ARM64)
// Ignoring this warning is considered better than relying on
// __builtin_available.
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunguarded-availability-new"
EnableWritePermissionsOnMacArm64Scope() { pthread_jit_write_protect_np(0); }
~EnableWritePermissionsOnMacArm64Scope() {
pthread_jit_write_protect_np(1);
}
#pragma clang diagnostic pop
#else
EnableWritePermissionsOnMacArm64Scope() {
// Define a constructor to avoid unused variable warnings.
}
#endif
};
for (int i = 0; i < kNumIterations; ++i) {
auto buffer = AllocateAssemblerBuffer(kBufferSize, nullptr,
VirtualMemory::kMapAsJittable);
@ -194,19 +212,13 @@ TEST(TestFlushICacheOfWritableAndExecutable) {
CHECK(SetPermissions(GetPlatformPageAllocator(), buffer->start(),
buffer->size(), v8::PageAllocator::kReadWriteExecute));
{
#if defined(V8_OS_DARWIN) && defined(V8_HOST_ARCH_ARM64)
// Make sure to switch memory to writable on M1 hardware.
wasm::CodeSpaceWriteScope code_space_write_scope(nullptr);
#endif
EnableWritePermissionsOnMacArm64Scope write_scope;
FloodWithInc(isolate, buffer.get());
FlushInstructionCache(buffer->start(), buffer->size());
}
CHECK_EQ(23 + kNumInstr, f.Call(23)); // Call into generated code.
{
#if defined(V8_OS_DARWIN) && defined(V8_HOST_ARCH_ARM64)
// Make sure to switch memory to writable on M1 hardware.
wasm::CodeSpaceWriteScope code_space_write_scope(nullptr);
#endif
EnableWritePermissionsOnMacArm64Scope write_scope;
FloodWithNop(isolate, buffer.get());
FlushInstructionCache(buffer->start(), buffer->size());
}

View File

@ -55,6 +55,17 @@ constexpr uint32_t kAvailableBufferSlots = 0;
constexpr uint32_t kBufferSlotStartOffset = 0;
#endif
void EnsureThreadHasWritePermissions() {
#if defined(V8_OS_DARWIN) && defined(V8_HOST_ARCH_ARM64)
// Ignoring this warning is considered better than relying on
// __builtin_available.
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunguarded-availability-new"
pthread_jit_write_protect_np(0);
#pragma clang diagnostic pop
#endif
}
Address AllocateJumpTableThunk(
Address jump_target, byte* thunk_slot_buffer,
std::bitset<kAvailableBufferSlots>* used_slots,
@ -203,10 +214,7 @@ class JumpTablePatcher : public v8::base::Thread {
void Run() override {
TRACE("Patcher %p is starting ...\n", this);
#if defined(V8_OS_DARWIN) && defined(V8_HOST_ARCH_ARM64)
// Make sure to switch memory to writable on M1 hardware.
CodeSpaceWriteScope code_space_write_scope(nullptr);
#endif
EnsureThreadHasWritePermissions();
Address slot_address =
slot_start_ + JumpTableAssembler::JumpSlotIndexToOffset(slot_index_);
// First, emit code to the two thunks.
@ -261,16 +269,13 @@ TEST(JumpTablePatchingStress) {
// Iterate through jump-table slots to hammer at different alignments within
// the jump-table, thereby increasing stress for variable-length ISAs.
Address slot_start = reinterpret_cast<Address>(buffer->start());
EnsureThreadHasWritePermissions();
for (int slot = 0; slot < kJumpTableSlotCount; ++slot) {
TRACE("Hammering on jump table slot #%d ...\n", slot);
uint32_t slot_offset = JumpTableAssembler::JumpSlotIndexToOffset(slot);
std::vector<std::unique_ptr<TestingAssemblerBuffer>> thunk_buffers;
std::vector<Address> patcher_thunks;
{
#if defined(V8_OS_DARWIN) && defined(V8_HOST_ARCH_ARM64)
// Make sure to switch memory to writable on M1 hardware.
CodeSpaceWriteScope code_space_write_scope(nullptr);
#endif
// Patch the jump table slot to jump to itself. This will later be patched
// by the patchers.
Address slot_addr =