Reland "[wasm] Move kMaxWasmCodeSpaceSize to wasm directory"
This is a reland of c2ea20473d
Original change's description:
> [wasm] Move kMaxWasmCodeSpaceSize to wasm directory
>
> This limit is wasm-internal, and does not need to be exposed via
> src/common/globals.h.
> This CL moves it into the {WasmCodeAllocator}.
>
> Drive-by: Minor simplification in jump table stress test.
>
> R=ecmziegler@chromium.org
>
> Change-Id: Iff8c4657697ae98123d840a022c5b21c4948fcdf
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2375189
> Reviewed-by: Emanuel Ziegler <ecmziegler@chromium.org>
> Commit-Queue: Clemens Backes <clemensb@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#69558}
Change-Id: I6e0432d14d23978dea599233e620e84d8255caf9
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2375388
Reviewed-by: Emanuel Ziegler <ecmziegler@chromium.org>
Commit-Queue: Clemens Backes <clemensb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69560}
This commit is contained in:
parent
dc3f30ef2e
commit
8860c5f1c5
@ -186,18 +186,9 @@ constexpr int kElidedFrameSlots = 0;
|
||||
#endif
|
||||
|
||||
constexpr int kDoubleSizeLog2 = 3;
|
||||
|
||||
constexpr size_t kMaxWasmCodeMB = 2048;
|
||||
constexpr size_t kMaxWasmCodeMemory = kMaxWasmCodeMB * MB;
|
||||
#if V8_TARGET_ARCH_ARM64
|
||||
// ARM64 only supports direct calls within a 128 MB range.
|
||||
constexpr size_t kMaxWasmCodeSpaceSize = 128 * MB;
|
||||
#else
|
||||
// Use 1024 MB limit for code spaces on other platforms. This is smaller than
|
||||
// the total allowed code space (kMaxWasmCodeMemory) to avoid unnecessarily big
|
||||
// reservations, and to ensure that distances within a code space fit within a
|
||||
// 32-bit signed integer.
|
||||
constexpr size_t kMaxWasmCodeSpaceSize = 1024 * MB;
|
||||
#endif
|
||||
|
||||
#if V8_HOST_ARCH_64_BIT
|
||||
constexpr int kSystemPointerSizeLog2 = 3;
|
||||
|
@ -495,6 +495,9 @@ void WasmCodeAllocator::OptionalLock::Lock(WasmCodeAllocator* allocator) {
|
||||
allocator->mutex_.Lock();
|
||||
}
|
||||
|
||||
// static
|
||||
constexpr size_t WasmCodeAllocator::kMaxCodeSpaceSize;
|
||||
|
||||
WasmCodeAllocator::WasmCodeAllocator(WasmCodeManager* code_manager,
|
||||
VirtualMemory code_space,
|
||||
std::shared_ptr<Counters> async_counters)
|
||||
@ -598,7 +601,7 @@ size_t ReservationSize(size_t code_size_estimate, int num_declared_functions,
|
||||
total_reserved / 4));
|
||||
|
||||
// Limit by the maximum supported code space size.
|
||||
return std::min(kMaxWasmCodeSpaceSize, reserve_size);
|
||||
return std::min(WasmCodeAllocator::kMaxCodeSpaceSize, reserve_size);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
@ -1419,7 +1422,7 @@ NativeModule::JumpTablesRef NativeModule::FindJumpTablesForRegion(
|
||||
size_t max_distance = std::max(
|
||||
code_region.end() > table_start ? code_region.end() - table_start : 0,
|
||||
table_end > code_region.begin() ? table_end - code_region.begin() : 0);
|
||||
return max_distance < kMaxWasmCodeSpaceSize;
|
||||
return max_distance < WasmCodeAllocator::kMaxCodeSpaceSize;
|
||||
};
|
||||
|
||||
// Fast path: Try to use {main_jump_table_} and {main_far_jump_table_}.
|
||||
|
@ -366,6 +366,17 @@ const char* GetWasmCodeKindAsString(WasmCode::Kind);
|
||||
// Manages the code reservations and allocations of a single {NativeModule}.
|
||||
class WasmCodeAllocator {
|
||||
public:
|
||||
#if V8_TARGET_ARCH_ARM64
|
||||
// ARM64 only supports direct calls within a 128 MB range.
|
||||
static constexpr size_t kMaxCodeSpaceSize = 128 * MB;
|
||||
#else
|
||||
// Use 1024 MB limit for code spaces on other platforms. This is smaller than
|
||||
// the total allowed code space (kMaxWasmCodeMemory) to avoid unnecessarily
|
||||
// big reservations, and to ensure that distances within a code space fit
|
||||
// within a 32-bit signed integer.
|
||||
static constexpr size_t kMaxCodeSpaceSize = 1024 * MB;
|
||||
#endif
|
||||
|
||||
// {OptionalLock} is passed between {WasmCodeAllocator} and {NativeModule} to
|
||||
// indicate that the lock on the {WasmCodeAllocator} is already taken. It's
|
||||
// optional to allow to also call methods without holding the lock.
|
||||
|
@ -36,12 +36,20 @@ constexpr uint32_t kJumpTableSize =
|
||||
constexpr size_t kThunkBufferSize = 4 * KB;
|
||||
|
||||
#if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64
|
||||
// We need the branches (from CompileJumpTableThunk) to be within near-call
|
||||
// range of the jump table slots. The address hint to AllocateAssemblerBuffer
|
||||
// is not reliable enough to guarantee that we can always achieve this with
|
||||
// separate allocations, so we generate all code in a single
|
||||
// kMaxCodeMemory-sized chunk.
|
||||
constexpr size_t kAssemblerBufferSize = WasmCodeAllocator::kMaxCodeSpaceSize;
|
||||
constexpr uint32_t kAvailableBufferSlots =
|
||||
(kMaxWasmCodeSpaceSize - kJumpTableSize) / kThunkBufferSize;
|
||||
(WasmCodeAllocator::kMaxCodeSpaceSize - kJumpTableSize) / kThunkBufferSize;
|
||||
constexpr uint32_t kBufferSlotStartOffset =
|
||||
RoundUp<kThunkBufferSize>(kJumpTableSize);
|
||||
#else
|
||||
constexpr size_t kAssemblerBufferSize = kJumpTableSize;
|
||||
constexpr uint32_t kAvailableBufferSlots = 0;
|
||||
constexpr uint32_t kBufferSlotStartOffset = 0;
|
||||
#endif
|
||||
|
||||
Address AllocateJumpTableThunk(
|
||||
@ -219,19 +227,9 @@ TEST(JumpTablePatchingStress) {
|
||||
constexpr int kNumberOfRunnerThreads = 5;
|
||||
constexpr int kNumberOfPatcherThreads = 3;
|
||||
|
||||
#if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_X64
|
||||
// We need the branches (from CompileJumpTableThunk) to be within near-call
|
||||
// range of the jump table slots. The address hint to AllocateAssemblerBuffer
|
||||
// is not reliable enough to guarantee that we can always achieve this with
|
||||
// separate allocations, so for Arm64 we generate all code in a single
|
||||
// kMaxMasmCodeMemory-sized chunk.
|
||||
STATIC_ASSERT(kMaxWasmCodeSpaceSize >= kJumpTableSize);
|
||||
auto buffer = AllocateAssemblerBuffer(kMaxWasmCodeSpaceSize);
|
||||
STATIC_ASSERT(kAssemblerBufferSize >= kJumpTableSize);
|
||||
auto buffer = AllocateAssemblerBuffer(kAssemblerBufferSize);
|
||||
byte* thunk_slot_buffer = buffer->start() + kBufferSlotStartOffset;
|
||||
#else
|
||||
auto buffer = AllocateAssemblerBuffer(kJumpTableSize);
|
||||
byte* thunk_slot_buffer = nullptr;
|
||||
#endif
|
||||
|
||||
std::bitset<kAvailableBufferSlots> used_thunk_slots;
|
||||
buffer->MakeWritableAndExecutable();
|
||||
|
Loading…
Reference in New Issue
Block a user