v8/test/cctest/test-icache.cc
Igor Sheludko 449ece383b Reland "[rwx][mac] Support fast W^X permission switching on Apple Silicon (M1)"
This is a reland of commit 9d31f8663a
There were issues with --future flag implications on M1.

Original change's description:
> [rwx][mac] Support fast W^X permission switching on Apple Silicon (M1)
>
> ... for V8 code space. The feature is currently disabled.
>
> In order to use fast W^X permission switching we must allocate
> executable pages with readable writable executable permissions (RWX).
> However, MacOS on ARM64 ("Apple M1"/Apple Silicon) prohibits further
> permission changing of RWX memory pages. This means that the code page
> headers must be allocated with RWX permissions too because otherwise
> it wouldn't be possible to allocate a large code page over the freed
> regular code page and vice versa.
>
> When enabled, the new machinery works as follows:
>
> 1) when memory region is reserved for allocating executable pages, the
>    whole region is committed with RWX permissions and then decommitted,
> 2) since reconfiguration of RWX page permissions is not allowed on
>    MacOS on ARM64 ("Apple M1"/Apple Silicon), there must be no attempts
>    to change them,
> 3) the request to set RWX permissions in the executable page region
>    just recommits the pages without changing permissions (see (1), they
>    were already allocated as RWX and then discarded),
> 4) in order to make executable pages inaccessible one must use
>    OS::DiscardSystemPages() instead of OS::DecommitPages() or
>    setting permissions to kNoAccess because the latter two are not
>    allowed by the MacOS (see (2)).
> 5) since code space page headers are allocated as RWX pages it's also
>    necessary to switch between W^X modes when updating the data in the
>    page headers (i.e. when marking, updating stats, wiring pages in
>    lists, etc.). The new CodePageHeaderModificationScope class is used
>    in the respective places. On unrelated configurations it's a no-op.
>
> The fast permission switching can't be used for V8 configuration with
> enabled pointer compression and disabled external code space because
> a) the pointer compression cage has to be reserved with MAP_JIT flag
>    which is too expensive,
> b) in case of shared pointer compression cage if the code range will
>    be deleted while the cage is still alive then attempt to configure
>    permissions of pages that were previously set to RWX will fail.
>
> This also CL extends the unmapper unit tests with permissions tracking
> for discarded pages.
>
> Bug: v8:12797
> Change-Id: Idb28cbc481306477589eee9962d2e75167d87c61
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3579303
> Reviewed-by: Nico Hartmann <nicohartmann@chromium.org>
> Reviewed-by: Clemens Backes <clemensb@chromium.org>
> Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
> Commit-Queue: Igor Sheludko <ishell@chromium.org>
> Cr-Commit-Position: refs/heads/main@{#80238}

Bug: v8:12797
Change-Id: I0fe86666f31bad37d7074e217555c95900d2afba
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3610433
Reviewed-by: Nico Hartmann <nicohartmann@chromium.org>
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Reviewed-by: Clemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/main@{#80259}
2022-04-28 14:08:11 +00:00

216 lines
6.7 KiB
C++

// Copyright 2018 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/codegen/assembler-inl.h"
#include "src/codegen/macro-assembler-inl.h"
#include "src/execution/simulator.h"
#include "src/handles/handles-inl.h"
#include "test/cctest/cctest.h"
#include "test/common/assembler-tester.h"
#if V8_ENABLE_WEBASSEMBLY
#include "src/wasm/code-space-access.h"
#endif // V8_ENABLE_WEBASSEMBLY
namespace v8 {
namespace internal {
namespace test_icache {
using F0 = int(int);
#define __ masm.
static constexpr int kNumInstr = 100;
static constexpr int kNumIterations = 5;
static constexpr int kBufferSize = 8 * KB;
static void FloodWithInc(Isolate* isolate, TestingAssemblerBuffer* buffer) {
MacroAssembler masm(isolate, CodeObjectRequired::kYes, buffer->CreateView());
#if V8_TARGET_ARCH_IA32
__ mov(eax, Operand(esp, kSystemPointerSize));
for (int i = 0; i < kNumInstr; ++i) {
__ add(eax, Immediate(1));
}
#elif V8_TARGET_ARCH_X64
__ movl(rax, arg_reg_1);
for (int i = 0; i < kNumInstr; ++i) {
__ addl(rax, Immediate(1));
}
#elif V8_TARGET_ARCH_ARM64
__ CodeEntry();
for (int i = 0; i < kNumInstr; ++i) {
__ Add(x0, x0, Operand(1));
}
#elif V8_TARGET_ARCH_ARM
for (int i = 0; i < kNumInstr; ++i) {
__ add(r0, r0, Operand(1));
}
#elif V8_TARGET_ARCH_MIPS
__ mov(v0, a0);
for (int i = 0; i < kNumInstr; ++i) {
__ Addu(v0, v0, Operand(1));
}
#elif V8_TARGET_ARCH_MIPS64
__ mov(v0, a0);
for (int i = 0; i < kNumInstr; ++i) {
__ Addu(v0, v0, Operand(1));
}
#elif V8_TARGET_ARCH_LOONG64
for (int i = 0; i < kNumInstr; ++i) {
__ Add_w(a0, a0, Operand(1));
}
#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
for (int i = 0; i < kNumInstr; ++i) {
__ addi(r3, r3, Operand(1));
}
#elif V8_TARGET_ARCH_S390
for (int i = 0; i < kNumInstr; ++i) {
__ agfi(r2, Operand(1));
}
#elif V8_TARGET_ARCH_RISCV64
for (int i = 0; i < kNumInstr; ++i) {
__ Add32(a0, a0, Operand(1));
}
#else
#error Unsupported architecture
#endif
__ Ret();
CodeDesc desc;
masm.GetCode(isolate, &desc);
}
static void FloodWithNop(Isolate* isolate, TestingAssemblerBuffer* buffer) {
MacroAssembler masm(isolate, CodeObjectRequired::kYes, buffer->CreateView());
#if V8_TARGET_ARCH_IA32
__ mov(eax, Operand(esp, kSystemPointerSize));
#elif V8_TARGET_ARCH_X64
__ movl(rax, arg_reg_1);
#elif V8_TARGET_ARCH_ARM64
__ CodeEntry();
#elif V8_TARGET_ARCH_MIPS
__ mov(v0, a0);
#elif V8_TARGET_ARCH_MIPS64
__ mov(v0, a0);
#endif
for (int i = 0; i < kNumInstr; ++i) {
__ nop();
}
__ Ret();
CodeDesc desc;
masm.GetCode(isolate, &desc);
}
// Order of operation for this test case:
// exec -> perm(RW) -> patch -> flush -> perm(RX) -> exec
TEST(TestFlushICacheOfWritable) {
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
for (int i = 0; i < kNumIterations; ++i) {
auto buffer = AllocateAssemblerBuffer(kBufferSize);
// Allow calling the function from C++.
auto f = GeneratedCode<F0>::FromBuffer(isolate, buffer->start());
{
AssemblerBufferWriteScope rw_buffer_scope(*buffer);
FloodWithInc(isolate, buffer.get());
FlushInstructionCache(buffer->start(), buffer->size());
}
CHECK_EQ(23 + kNumInstr, f.Call(23)); // Call into generated code.
{
AssemblerBufferWriteScope rw_buffer_scope(*buffer);
FloodWithNop(isolate, buffer.get());
FlushInstructionCache(buffer->start(), buffer->size());
}
CHECK_EQ(23, f.Call(23)); // Call into generated code.
}
}
#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64
// Note that this order of operations is not supported on ARM32/64 because on
// some older ARM32/64 kernels there is a bug which causes an access error on
// cache flush instructions to trigger access error on non-writable memory.
// See https://bugs.chromium.org/p/v8/issues/detail?id=8157
//
// Also note that this requires {kBufferSize == 8 * KB} to reproduce.
//
// The order of operations in V8 is akin to {TestFlushICacheOfWritable} above.
// It is hence OK to disable the below test on some architectures. Only the
// above test case should remain enabled on all architectures.
#define CONDITIONAL_TEST DISABLED_TEST
#else
#define CONDITIONAL_TEST TEST
#endif
// Order of operation for this test case:
// exec -> perm(RW) -> patch -> perm(RX) -> flush -> exec
CONDITIONAL_TEST(TestFlushICacheOfExecutable) {
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
for (int i = 0; i < kNumIterations; ++i) {
auto buffer = AllocateAssemblerBuffer(kBufferSize);
// Allow calling the function from C++.
auto f = GeneratedCode<F0>::FromBuffer(isolate, buffer->start());
CHECK(SetPermissions(GetPlatformPageAllocator(), buffer->start(),
buffer->size(), v8::PageAllocator::kReadWrite));
FloodWithInc(isolate, buffer.get());
CHECK(SetPermissions(GetPlatformPageAllocator(), buffer->start(),
buffer->size(), v8::PageAllocator::kReadExecute));
FlushInstructionCache(buffer->start(), buffer->size());
CHECK_EQ(23 + kNumInstr, f.Call(23)); // Call into generated code.
CHECK(SetPermissions(GetPlatformPageAllocator(), buffer->start(),
buffer->size(), v8::PageAllocator::kReadWrite));
FloodWithNop(isolate, buffer.get());
CHECK(SetPermissions(GetPlatformPageAllocator(), buffer->start(),
buffer->size(), v8::PageAllocator::kReadExecute));
FlushInstructionCache(buffer->start(), buffer->size());
CHECK_EQ(23, f.Call(23)); // Call into generated code.
}
}
#undef CONDITIONAL_TEST
#if V8_ENABLE_WEBASSEMBLY
// Order of operation for this test case:
// perm(RWX) -> exec -> patch -> flush -> exec
TEST(TestFlushICacheOfWritableAndExecutable) {
Isolate* isolate = CcTest::i_isolate();
HandleScope handles(isolate);
for (int i = 0; i < kNumIterations; ++i) {
auto buffer = AllocateAssemblerBuffer(kBufferSize, nullptr,
JitPermission::kMapAsJittable);
// Allow calling the function from C++.
auto f = GeneratedCode<F0>::FromBuffer(isolate, buffer->start());
buffer->MakeWritableAndExecutable();
{
RwxMemoryWriteScopeForTesting rw_scope;
FloodWithInc(isolate, buffer.get());
FlushInstructionCache(buffer->start(), buffer->size());
}
CHECK_EQ(23 + kNumInstr, f.Call(23)); // Call into generated code.
{
RwxMemoryWriteScopeForTesting rw_scope;
FloodWithNop(isolate, buffer.get());
FlushInstructionCache(buffer->start(), buffer->size());
}
CHECK_EQ(23, f.Call(23)); // Call into generated code.
}
}
#endif // V8_ENABLE_WEBASSEMBLY
#undef __
} // namespace test_icache
} // namespace internal
} // namespace v8