2018-08-07 10:33:50 +00:00
|
|
|
// Copyright 2018 the V8 project authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file.
|
|
|
|
|
2018-11-27 17:36:36 +00:00
|
|
|
#include <bitset>
|
|
|
|
|
2021-06-11 10:16:19 +00:00
|
|
|
#include "src/base/utils/random-number-generator.h"
|
2019-05-21 09:30:15 +00:00
|
|
|
#include "src/codegen/assembler-inl.h"
|
|
|
|
#include "src/codegen/macro-assembler-inl.h"
|
2019-05-22 07:55:37 +00:00
|
|
|
#include "src/execution/simulator.h"
|
2019-05-23 13:27:57 +00:00
|
|
|
#include "src/utils/utils.h"
|
2020-09-09 17:25:41 +00:00
|
|
|
#include "src/wasm/code-space-access.h"
|
2018-08-07 10:33:50 +00:00
|
|
|
#include "src/wasm/jump-table-assembler.h"
|
|
|
|
#include "test/cctest/cctest.h"
|
|
|
|
#include "test/common/assembler-tester.h"
|
|
|
|
|
|
|
|
namespace v8 {
|
|
|
|
namespace internal {
|
|
|
|
namespace wasm {
|
|
|
|
|
|
|
|
#if 0
|
|
|
|
#define TRACE(...) PrintF(__VA_ARGS__)
|
|
|
|
#else
|
|
|
|
#define TRACE(...)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define __ masm.
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
static volatile int global_stop_bit = 0;
|
|
|
|
|
2018-11-27 17:36:36 +00:00
|
|
|
constexpr int kJumpTableSlotCount = 128;
|
|
|
|
constexpr uint32_t kJumpTableSize =
|
|
|
|
JumpTableAssembler::SizeForNumberOfSlots(kJumpTableSlotCount);
|
|
|
|
|
2020-10-09 15:26:18 +00:00
|
|
|
// This must be a safe commit page size so we pick the largest OS page size that
|
|
|
|
// V8 is known to support. Arm64 linux can support up to 64k at runtime.
|
|
|
|
constexpr size_t kThunkBufferSize = 64 * KB;
|
2019-08-06 14:20:18 +00:00
|
|
|
|
2019-04-10 15:46:49 +00:00
|
|
|
#if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64
|
2020-08-25 16:27:00 +00:00
|
|
|
// We need the branches (from CompileJumpTableThunk) to be within near-call
|
|
|
|
// range of the jump table slots. The address hint to AllocateAssemblerBuffer
|
|
|
|
// is not reliable enough to guarantee that we can always achieve this with
|
|
|
|
// separate allocations, so we generate all code in a single
|
|
|
|
// kMaxCodeMemory-sized chunk.
|
|
|
|
constexpr size_t kAssemblerBufferSize = WasmCodeAllocator::kMaxCodeSpaceSize;
|
2018-11-27 17:36:36 +00:00
|
|
|
constexpr uint32_t kAvailableBufferSlots =
|
2020-08-25 16:27:00 +00:00
|
|
|
(WasmCodeAllocator::kMaxCodeSpaceSize - kJumpTableSize) / kThunkBufferSize;
|
2018-11-27 17:36:36 +00:00
|
|
|
constexpr uint32_t kBufferSlotStartOffset =
|
2019-08-06 14:20:18 +00:00
|
|
|
RoundUp<kThunkBufferSize>(kJumpTableSize);
|
2018-11-27 17:36:36 +00:00
|
|
|
#else
|
2020-08-25 16:27:00 +00:00
|
|
|
constexpr size_t kAssemblerBufferSize = kJumpTableSize;
|
2018-11-27 17:36:36 +00:00
|
|
|
constexpr uint32_t kAvailableBufferSlots = 0;
|
2020-08-25 16:27:00 +00:00
|
|
|
constexpr uint32_t kBufferSlotStartOffset = 0;
|
2018-11-27 17:36:36 +00:00
|
|
|
#endif
|
|
|
|
|
2019-08-06 14:20:18 +00:00
|
|
|
Address AllocateJumpTableThunk(
|
2019-01-17 10:23:16 +00:00
|
|
|
Address jump_target, byte* thunk_slot_buffer,
|
|
|
|
std::bitset<kAvailableBufferSlots>* used_slots,
|
|
|
|
std::vector<std::unique_ptr<TestingAssemblerBuffer>>* thunk_buffers) {
|
2019-04-10 15:46:49 +00:00
|
|
|
#if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64
|
2018-11-27 17:36:36 +00:00
|
|
|
// To guarantee that the branch range lies within the near-call range,
|
2019-09-25 09:59:20 +00:00
|
|
|
// generate the thunk in the same (kMaxWasmCodeSpaceSize-sized) buffer as the
|
2018-11-27 17:36:36 +00:00
|
|
|
// jump_target itself.
|
|
|
|
//
|
|
|
|
// Allocate a slot that we haven't already used. This is necessary because
|
|
|
|
// each test iteration expects to generate two unique addresses and we leave
|
|
|
|
// each slot executable (and not writable).
|
|
|
|
base::RandomNumberGenerator* rng =
|
|
|
|
CcTest::i_isolate()->random_number_generator();
|
|
|
|
// Ensure a chance of completion without too much thrashing.
|
|
|
|
DCHECK(used_slots->count() < (used_slots->size() / 2));
|
|
|
|
int buffer_index;
|
|
|
|
do {
|
|
|
|
buffer_index = rng->NextInt(kAvailableBufferSlots);
|
|
|
|
} while (used_slots->test(buffer_index));
|
|
|
|
used_slots->set(buffer_index);
|
2019-08-06 14:20:18 +00:00
|
|
|
return reinterpret_cast<Address>(thunk_slot_buffer +
|
|
|
|
buffer_index * kThunkBufferSize);
|
2018-11-27 17:36:36 +00:00
|
|
|
|
2018-08-09 09:07:16 +00:00
|
|
|
#else
|
2018-11-27 17:36:36 +00:00
|
|
|
USE(thunk_slot_buffer);
|
|
|
|
USE(used_slots);
|
2019-08-06 14:20:18 +00:00
|
|
|
thunk_buffers->emplace_back(
|
|
|
|
AllocateAssemblerBuffer(kThunkBufferSize, GetRandomMmapAddr()));
|
|
|
|
return reinterpret_cast<Address>(thunk_buffers->back()->start());
|
2018-08-09 09:07:16 +00:00
|
|
|
#endif
|
2019-08-06 14:20:18 +00:00
|
|
|
}
|
2019-01-17 10:23:16 +00:00
|
|
|
|
2019-08-06 14:20:18 +00:00
|
|
|
void CompileJumpTableThunk(Address thunk, Address jump_target) {
|
|
|
|
MacroAssembler masm(nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
|
|
|
|
ExternalAssemblerBuffer(reinterpret_cast<void*>(thunk),
|
|
|
|
kThunkBufferSize));
|
2018-08-07 10:33:50 +00:00
|
|
|
|
|
|
|
Label exit;
|
|
|
|
Register scratch = kReturnRegister0;
|
|
|
|
Address stop_bit_address = reinterpret_cast<Address>(&global_stop_bit);
|
|
|
|
#if V8_TARGET_ARCH_X64
|
2021-11-17 18:14:02 +00:00
|
|
|
__ Move(scratch, stop_bit_address, RelocInfo::NO_INFO);
|
2018-08-07 10:33:50 +00:00
|
|
|
__ testl(MemOperand(scratch, 0), Immediate(1));
|
|
|
|
__ j(not_zero, &exit);
|
2021-11-17 18:14:02 +00:00
|
|
|
__ Jump(jump_target, RelocInfo::NO_INFO);
|
2018-08-07 10:33:50 +00:00
|
|
|
#elif V8_TARGET_ARCH_IA32
|
2021-11-17 18:14:02 +00:00
|
|
|
__ Move(scratch, Immediate(stop_bit_address, RelocInfo::NO_INFO));
|
2018-08-07 10:33:50 +00:00
|
|
|
__ test(MemOperand(scratch, 0), Immediate(1));
|
|
|
|
__ j(not_zero, &exit);
|
2021-11-17 18:14:02 +00:00
|
|
|
__ jmp(jump_target, RelocInfo::NO_INFO);
|
2018-08-07 14:19:13 +00:00
|
|
|
#elif V8_TARGET_ARCH_ARM
|
2021-11-17 18:14:02 +00:00
|
|
|
__ mov(scratch, Operand(stop_bit_address, RelocInfo::NO_INFO));
|
2018-08-07 14:19:13 +00:00
|
|
|
__ ldr(scratch, MemOperand(scratch, 0));
|
|
|
|
__ tst(scratch, Operand(1));
|
|
|
|
__ b(ne, &exit);
|
2021-11-17 18:14:02 +00:00
|
|
|
__ Jump(jump_target, RelocInfo::NO_INFO);
|
2018-08-09 09:07:16 +00:00
|
|
|
#elif V8_TARGET_ARCH_ARM64
|
2020-06-30 14:58:02 +00:00
|
|
|
UseScratchRegisterScope temps(&masm);
|
|
|
|
temps.Exclude(x16);
|
|
|
|
scratch = x16;
|
2021-11-17 18:14:02 +00:00
|
|
|
__ Mov(scratch, Operand(stop_bit_address, RelocInfo::NO_INFO));
|
2018-08-09 09:07:16 +00:00
|
|
|
__ Ldr(scratch, MemOperand(scratch, 0));
|
|
|
|
__ Tbnz(scratch, 0, &exit);
|
2021-11-17 18:14:02 +00:00
|
|
|
__ Mov(scratch, Immediate(jump_target, RelocInfo::NO_INFO));
|
2018-08-09 09:07:16 +00:00
|
|
|
__ Br(scratch);
|
2019-02-20 14:18:01 +00:00
|
|
|
#elif V8_TARGET_ARCH_PPC64
|
2021-11-17 18:14:02 +00:00
|
|
|
__ mov(scratch, Operand(stop_bit_address, RelocInfo::NO_INFO));
|
2021-05-12 22:28:46 +00:00
|
|
|
__ LoadU64(scratch, MemOperand(scratch));
|
2019-02-20 14:18:01 +00:00
|
|
|
__ cmpi(scratch, Operand::Zero());
|
|
|
|
__ bne(&exit);
|
2021-11-17 18:14:02 +00:00
|
|
|
__ mov(scratch, Operand(jump_target, RelocInfo::NO_INFO));
|
2019-02-20 14:18:01 +00:00
|
|
|
__ Jump(scratch);
|
|
|
|
#elif V8_TARGET_ARCH_S390X
|
2021-11-17 18:14:02 +00:00
|
|
|
__ mov(scratch, Operand(stop_bit_address, RelocInfo::NO_INFO));
|
2021-05-12 22:28:46 +00:00
|
|
|
__ LoadU64(scratch, MemOperand(scratch));
|
2019-02-20 14:18:01 +00:00
|
|
|
__ CmpP(scratch, Operand(0));
|
|
|
|
__ bne(&exit);
|
2021-11-17 18:14:02 +00:00
|
|
|
__ mov(scratch, Operand(jump_target, RelocInfo::NO_INFO));
|
2019-02-20 14:18:01 +00:00
|
|
|
__ Jump(scratch);
|
2019-02-27 07:10:24 +00:00
|
|
|
#elif V8_TARGET_ARCH_MIPS64
|
2021-11-17 18:14:02 +00:00
|
|
|
__ li(scratch, Operand(stop_bit_address, RelocInfo::NO_INFO));
|
2019-02-27 07:10:24 +00:00
|
|
|
__ Lw(scratch, MemOperand(scratch, 0));
|
|
|
|
__ Branch(&exit, ne, scratch, Operand(zero_reg));
|
2021-11-17 18:14:02 +00:00
|
|
|
__ Jump(jump_target, RelocInfo::NO_INFO);
|
2021-08-11 11:54:59 +00:00
|
|
|
#elif V8_TARGET_ARCH_LOONG64
|
2021-11-17 18:14:02 +00:00
|
|
|
__ li(scratch, Operand(stop_bit_address, RelocInfo::NO_INFO));
|
2021-08-11 11:54:59 +00:00
|
|
|
__ Ld_w(scratch, MemOperand(scratch, 0));
|
|
|
|
__ Branch(&exit, ne, scratch, Operand(zero_reg));
|
2021-11-17 18:14:02 +00:00
|
|
|
__ Jump(jump_target, RelocInfo::NO_INFO);
|
2019-02-27 07:10:24 +00:00
|
|
|
#elif V8_TARGET_ARCH_MIPS
|
2021-11-17 18:14:02 +00:00
|
|
|
__ li(scratch, Operand(stop_bit_address, RelocInfo::NO_INFO));
|
2019-02-27 07:10:24 +00:00
|
|
|
__ lw(scratch, MemOperand(scratch, 0));
|
|
|
|
__ Branch(&exit, ne, scratch, Operand(zero_reg));
|
2021-11-17 18:14:02 +00:00
|
|
|
__ Jump(jump_target, RelocInfo::NO_INFO);
|
2021-02-09 16:11:55 +00:00
|
|
|
#elif V8_TARGET_ARCH_RISCV64
|
2021-11-17 18:14:02 +00:00
|
|
|
__ li(scratch, Operand(stop_bit_address, RelocInfo::NO_INFO));
|
2021-02-09 16:11:55 +00:00
|
|
|
__ Lw(scratch, MemOperand(scratch, 0));
|
|
|
|
__ Branch(&exit, ne, scratch, Operand(zero_reg));
|
2021-11-17 18:14:02 +00:00
|
|
|
__ Jump(jump_target, RelocInfo::NO_INFO);
|
2018-08-07 10:33:50 +00:00
|
|
|
#else
|
|
|
|
#error Unsupported architecture
|
|
|
|
#endif
|
|
|
|
__ bind(&exit);
|
|
|
|
__ Ret();
|
|
|
|
|
2019-08-06 14:20:18 +00:00
|
|
|
FlushInstructionCache(thunk, kThunkBufferSize);
|
2022-02-17 10:40:49 +00:00
|
|
|
#if defined(V8_OS_DARWIN) && defined(V8_HOST_ARCH_ARM64)
|
2021-04-16 17:05:27 +00:00
|
|
|
// MacOS on arm64 refuses {mprotect} calls to toggle permissions of RWX
|
2021-07-15 10:30:47 +00:00
|
|
|
// memory. Simply do nothing here, as the space will by default be executable
|
|
|
|
// and non-writable for the JumpTableRunner.
|
2021-04-16 17:05:27 +00:00
|
|
|
#else
|
2019-08-06 14:20:18 +00:00
|
|
|
CHECK(SetPermissions(GetPlatformPageAllocator(), thunk, kThunkBufferSize,
|
|
|
|
v8::PageAllocator::kReadExecute));
|
2021-04-16 17:05:27 +00:00
|
|
|
#endif
|
2018-08-07 10:33:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
class JumpTableRunner : public v8::base::Thread {
|
|
|
|
public:
|
|
|
|
JumpTableRunner(Address slot_address, int runner_id)
|
|
|
|
: Thread(Options("JumpTableRunner")),
|
|
|
|
slot_address_(slot_address),
|
|
|
|
runner_id_(runner_id) {}
|
|
|
|
|
|
|
|
void Run() override {
|
|
|
|
TRACE("Runner #%d is starting ...\n", runner_id_);
|
2018-08-07 14:19:13 +00:00
|
|
|
GeneratedCode<void>::FromAddress(CcTest::i_isolate(), slot_address_).Call();
|
2018-08-07 10:33:50 +00:00
|
|
|
TRACE("Runner #%d is stopping ...\n", runner_id_);
|
|
|
|
USE(runner_id_);
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
Address slot_address_;
|
|
|
|
int runner_id_;
|
|
|
|
};
|
|
|
|
|
|
|
|
class JumpTablePatcher : public v8::base::Thread {
|
|
|
|
public:
|
|
|
|
JumpTablePatcher(Address slot_start, uint32_t slot_index, Address thunk1,
|
2019-08-06 14:20:18 +00:00
|
|
|
Address thunk2, base::Mutex* jump_table_mutex)
|
2018-08-07 10:33:50 +00:00
|
|
|
: Thread(Options("JumpTablePatcher")),
|
|
|
|
slot_start_(slot_start),
|
|
|
|
slot_index_(slot_index),
|
2019-08-06 14:20:18 +00:00
|
|
|
thunks_{thunk1, thunk2},
|
|
|
|
jump_table_mutex_(jump_table_mutex) {}
|
2018-08-07 10:33:50 +00:00
|
|
|
|
|
|
|
void Run() override {
|
2019-08-06 14:20:18 +00:00
|
|
|
TRACE("Patcher %p is starting ...\n", this);
|
2022-04-18 20:03:38 +00:00
|
|
|
RwxMemoryWriteScopeForTesting rwx_write_scope;
|
2019-08-06 14:20:18 +00:00
|
|
|
Address slot_address =
|
|
|
|
slot_start_ + JumpTableAssembler::JumpSlotIndexToOffset(slot_index_);
|
|
|
|
// First, emit code to the two thunks.
|
|
|
|
for (Address thunk : thunks_) {
|
|
|
|
CompileJumpTableThunk(thunk, slot_address);
|
|
|
|
}
|
|
|
|
// Then, repeatedly patch the jump table to jump to one of the two thunks.
|
2018-08-07 10:33:50 +00:00
|
|
|
constexpr int kNumberOfPatchIterations = 64;
|
|
|
|
for (int i = 0; i < kNumberOfPatchIterations; ++i) {
|
2019-09-25 09:59:20 +00:00
|
|
|
TRACE(" patcher %p patch slot " V8PRIxPTR_FMT
|
|
|
|
" to thunk #%d (" V8PRIxPTR_FMT ")\n",
|
|
|
|
this, slot_address, i % 2, thunks_[i % 2]);
|
2019-08-06 14:20:18 +00:00
|
|
|
base::MutexGuard jump_table_guard(jump_table_mutex_);
|
2019-09-13 11:43:41 +00:00
|
|
|
JumpTableAssembler::PatchJumpTableSlot(
|
|
|
|
slot_start_ + JumpTableAssembler::JumpSlotIndexToOffset(slot_index_),
|
|
|
|
kNullAddress, thunks_[i % 2]);
|
2018-08-07 10:33:50 +00:00
|
|
|
}
|
2019-08-06 14:20:18 +00:00
|
|
|
TRACE("Patcher %p is stopping ...\n", this);
|
2018-08-07 10:33:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
Address slot_start_;
|
|
|
|
uint32_t slot_index_;
|
|
|
|
Address thunks_[2];
|
2019-08-06 14:20:18 +00:00
|
|
|
base::Mutex* jump_table_mutex_;
|
2018-08-07 10:33:50 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
// This test is intended to stress concurrent patching of jump-table slots. It
|
|
|
|
// uses the following setup:
|
|
|
|
// 1) Picks a particular slot of the jump-table. Slots are iterated over to
|
|
|
|
// ensure multiple entries (at different offset alignments) are tested.
|
|
|
|
// 2) Starts multiple runners that spin through the above slot. The runners
|
|
|
|
// use thunk code that will jump to the same jump-table slot repeatedly
|
|
|
|
// until the {global_stop_bit} indicates a test-end condition.
|
|
|
|
// 3) Start a patcher that repeatedly patches the jump-table slot back and
|
|
|
|
// forth between two thunk. If there is a race then chances are high that
|
|
|
|
// one of the runners is currently executing the jump-table slot.
|
|
|
|
TEST(JumpTablePatchingStress) {
|
|
|
|
constexpr int kNumberOfRunnerThreads = 5;
|
2019-08-06 14:20:18 +00:00
|
|
|
constexpr int kNumberOfPatcherThreads = 3;
|
2018-08-07 10:33:50 +00:00
|
|
|
|
2022-05-13 09:19:09 +00:00
|
|
|
static_assert(kAssemblerBufferSize >= kJumpTableSize);
|
2021-01-19 12:21:14 +00:00
|
|
|
auto buffer = AllocateAssemblerBuffer(kAssemblerBufferSize, nullptr,
|
Reland "[rwx][mac] Support fast W^X permission switching on Apple Silicon (M1)"
This is a reland of commit 9d31f8663ad72fdf04d15a72d83b54a6ac33b640
There were issues with --future flag implications on M1.
Original change's description:
> [rwx][mac] Support fast W^X permission switching on Apple Silicon (M1)
>
> ... for V8 code space. The feature is currently disabled.
>
> In order to use fast W^X permission switching we must allocate
> executable pages with readable writable executable permissions (RWX).
> However, MacOS on ARM64 ("Apple M1"/Apple Silicon) prohibits further
> permission changing of RWX memory pages. This means that the code page
> headers must be allocated with RWX permissions too because otherwise
> it wouldn't be possible to allocate a large code page over the freed
> regular code page and vice versa.
>
> When enabled, the new machinery works as follows:
>
> 1) when memory region is reserved for allocating executable pages, the
> whole region is committed with RWX permissions and then decommitted,
> 2) since reconfiguration of RWX page permissions is not allowed on
> MacOS on ARM64 ("Apple M1"/Apple Silicon), there must be no attempts
> to change them,
> 3) the request to set RWX permissions in the executable page region
> just recommits the pages without changing permissions (see (1), they
> were already allocated as RWX and then discarded),
> 4) in order to make executable pages inaccessible one must use
> OS::DiscardSystemPages() instead of OS::DecommitPages() or
> setting permissions to kNoAccess because the latter two are not
> allowed by the MacOS (see (2)).
> 5) since code space page headers are allocated as RWX pages it's also
> necessary to switch between W^X modes when updating the data in the
> page headers (i.e. when marking, updating stats, wiring pages in
> lists, etc.). The new CodePageHeaderModificationScope class is used
> in the respective places. On unrelated configurations it's a no-op.
>
> The fast permission switching can't be used for V8 configuration with
> enabled pointer compression and disabled external code space because
> a) the pointer compression cage has to be reserved with MAP_JIT flag
> which is too expensive,
> b) in case of shared pointer compression cage if the code range will
> be deleted while the cage is still alive then attempt to configure
> permissions of pages that were previously set to RWX will fail.
>
> This also CL extends the unmapper unit tests with permissions tracking
> for discarded pages.
>
> Bug: v8:12797
> Change-Id: Idb28cbc481306477589eee9962d2e75167d87c61
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3579303
> Reviewed-by: Nico Hartmann <nicohartmann@chromium.org>
> Reviewed-by: Clemens Backes <clemensb@chromium.org>
> Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
> Commit-Queue: Igor Sheludko <ishell@chromium.org>
> Cr-Commit-Position: refs/heads/main@{#80238}
Bug: v8:12797
Change-Id: I0fe86666f31bad37d7074e217555c95900d2afba
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3610433
Reviewed-by: Nico Hartmann <nicohartmann@chromium.org>
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Reviewed-by: Clemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/main@{#80259}
2022-04-28 10:14:12 +00:00
|
|
|
JitPermission::kMapAsJittable);
|
2019-01-17 10:23:16 +00:00
|
|
|
byte* thunk_slot_buffer = buffer->start() + kBufferSlotStartOffset;
|
2019-04-10 15:46:49 +00:00
|
|
|
|
2018-11-27 17:36:36 +00:00
|
|
|
std::bitset<kAvailableBufferSlots> used_thunk_slots;
|
2019-01-17 10:23:16 +00:00
|
|
|
buffer->MakeWritableAndExecutable();
|
2022-04-18 20:03:38 +00:00
|
|
|
RwxMemoryWriteScopeForTesting rwx_write_scope;
|
2018-08-07 10:33:50 +00:00
|
|
|
|
|
|
|
// Iterate through jump-table slots to hammer at different alignments within
|
|
|
|
// the jump-table, thereby increasing stress for variable-length ISAs.
|
2019-01-17 10:23:16 +00:00
|
|
|
Address slot_start = reinterpret_cast<Address>(buffer->start());
|
2018-08-07 10:33:50 +00:00
|
|
|
for (int slot = 0; slot < kJumpTableSlotCount; ++slot) {
|
|
|
|
TRACE("Hammering on jump table slot #%d ...\n", slot);
|
2019-07-12 12:05:34 +00:00
|
|
|
uint32_t slot_offset = JumpTableAssembler::JumpSlotIndexToOffset(slot);
|
2019-01-17 10:23:16 +00:00
|
|
|
std::vector<std::unique_ptr<TestingAssemblerBuffer>> thunk_buffers;
|
2019-08-06 14:20:18 +00:00
|
|
|
std::vector<Address> patcher_thunks;
|
2021-07-15 10:30:47 +00:00
|
|
|
{
|
|
|
|
// Patch the jump table slot to jump to itself. This will later be patched
|
|
|
|
// by the patchers.
|
|
|
|
Address slot_addr =
|
|
|
|
slot_start + JumpTableAssembler::JumpSlotIndexToOffset(slot);
|
|
|
|
JumpTableAssembler::PatchJumpTableSlot(slot_addr, kNullAddress,
|
|
|
|
slot_addr);
|
|
|
|
// For each patcher, generate two thunks where this patcher can emit code
|
|
|
|
// which finally jumps back to {slot} in the jump table.
|
|
|
|
for (int i = 0; i < 2 * kNumberOfPatcherThreads; ++i) {
|
|
|
|
Address thunk =
|
|
|
|
AllocateJumpTableThunk(slot_start + slot_offset, thunk_slot_buffer,
|
|
|
|
&used_thunk_slots, &thunk_buffers);
|
|
|
|
ZapCode(thunk, kThunkBufferSize);
|
|
|
|
patcher_thunks.push_back(thunk);
|
|
|
|
TRACE(" generated jump thunk: " V8PRIxPTR_FMT "\n",
|
|
|
|
patcher_thunks.back());
|
|
|
|
}
|
2019-08-06 14:20:18 +00:00
|
|
|
}
|
2018-08-07 10:33:50 +00:00
|
|
|
|
2019-08-06 14:20:18 +00:00
|
|
|
// Start multiple runner threads that execute the jump table slot
|
|
|
|
// concurrently.
|
2018-08-07 10:33:50 +00:00
|
|
|
std::list<JumpTableRunner> runners;
|
|
|
|
for (int runner = 0; runner < kNumberOfRunnerThreads; ++runner) {
|
|
|
|
runners.emplace_back(slot_start + slot_offset, runner);
|
|
|
|
}
|
2019-08-06 14:20:18 +00:00
|
|
|
// Start multiple patcher thread that concurrently generate code and insert
|
|
|
|
// jumps to that into the jump table slot.
|
|
|
|
std::list<JumpTablePatcher> patchers;
|
|
|
|
// Only one patcher should modify the jump table at a time.
|
|
|
|
base::Mutex jump_table_mutex;
|
|
|
|
for (int i = 0; i < kNumberOfPatcherThreads; ++i) {
|
|
|
|
patchers.emplace_back(slot_start, slot, patcher_thunks[2 * i],
|
|
|
|
patcher_thunks[2 * i + 1], &jump_table_mutex);
|
|
|
|
}
|
2018-08-07 10:33:50 +00:00
|
|
|
global_stop_bit = 0; // Signal runners to keep going.
|
2019-07-29 13:09:02 +00:00
|
|
|
for (auto& runner : runners) CHECK(runner.Start());
|
2019-08-06 14:20:18 +00:00
|
|
|
for (auto& patcher : patchers) CHECK(patcher.Start());
|
|
|
|
for (auto& patcher : patchers) patcher.Join();
|
2018-08-07 10:33:50 +00:00
|
|
|
global_stop_bit = -1; // Signal runners to stop.
|
|
|
|
for (auto& runner : runners) runner.Join();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#undef __
|
|
|
|
#undef TRACE
|
|
|
|
|
|
|
|
} // namespace wasm
|
|
|
|
} // namespace internal
|
|
|
|
} // namespace v8
|