[wasm] Introduce a soft limit on reserved memory

Currently, wasm memory and wasm code use a shared limit for the total
size of reservations. This can cause wasm code reservations to fail
because wasm memories used all available reservation space.
This CL introduces a soft limit which is used when allocating wasm
memory with full guards. If this limit is reached and the respective
flag is set, we fall back to allocation without full guards and check
against the hard limit. Code reservations always check against the hard
limit.

R=ahaas@chromium.org

Bug: v8:8196
Change-Id: I3fcbaeaa6f72c972d408d291af5d6b788d43151d
Reviewed-on: https://chromium-review.googlesource.com/1233614
Reviewed-by: Andreas Haas <ahaas@chromium.org>
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#56028}
This commit is contained in:
Clemens Hammacher 2018-09-19 13:00:14 +02:00 committed by Commit Bot
parent 49ae2db3cf
commit 3bb5cb63da
4 changed files with 72 additions and 74 deletions

View File

@ -817,7 +817,10 @@ VirtualMemory WasmCodeManager::TryAllocate(size_t size, void* hint) {
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
DCHECK_GT(size, 0);
size = RoundUp(size, page_allocator->AllocatePageSize());
if (!memory_tracker_->ReserveAddressSpace(size)) return {};
if (!memory_tracker_->ReserveAddressSpace(size,
WasmMemoryTracker::kHardLimit)) {
return {};
}
if (hint == nullptr) hint = page_allocator->GetRandomMmapAddr();
VirtualMemory mem(page_allocator, size, hint,

View File

@ -27,28 +27,14 @@ void AddAllocationStatusSample(Isolate* isolate,
}
void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
size_t size, bool require_full_guard_regions,
void** allocation_base,
size_t size, void** allocation_base,
size_t* allocation_length) {
using AllocationStatus = WasmMemoryTracker::AllocationStatus;
#if V8_TARGET_ARCH_32_BIT
DCHECK(!require_full_guard_regions);
#if V8_TARGET_ARCH_64_BIT
bool require_full_guard_regions = true;
#else
bool require_full_guard_regions = false;
#endif
// We always allocate the largest possible offset into the heap, so the
// addressable memory after the guard page can be made inaccessible.
//
// To protect against 32-bit integer overflow issues, we also protect the 2GiB
// before the valid part of the memory buffer.
// TODO(7881): do not use static_cast<uint32_t>() here
*allocation_length =
require_full_guard_regions
? RoundUp(kWasmMaxHeapOffset + kNegativeGuardSize, CommitPageSize())
: RoundUp(
base::bits::RoundUpToPowerOfTwo32(static_cast<uint32_t>(size)),
kWasmPageSize);
DCHECK_GE(*allocation_length, size);
DCHECK_GE(*allocation_length, kWasmPageSize);
// Let the WasmMemoryTracker know we are going to reserve a bunch of
// address space.
// Try up to three times; getting rid of dead JSArrayBuffer allocations might
@ -57,17 +43,43 @@ void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
static constexpr int kAllocationRetries = 2;
bool did_retry = false;
for (int trial = 0;; ++trial) {
if (memory_tracker->ReserveAddressSpace(*allocation_length)) break;
// For guard regions, we always allocate the largest possible offset into
// the heap, so the addressable memory after the guard page can be made
// inaccessible.
//
// To protect against 32-bit integer overflow issues, we also protect the
// 2GiB before the valid part of the memory buffer.
// TODO(7881): do not use static_cast<uint32_t>() here
*allocation_length =
require_full_guard_regions
? RoundUp(kWasmMaxHeapOffset + kNegativeGuardSize, CommitPageSize())
: RoundUp(base::bits::RoundUpToPowerOfTwo32(
static_cast<uint32_t>(size)),
kWasmPageSize);
DCHECK_GE(*allocation_length, size);
DCHECK_GE(*allocation_length, kWasmPageSize);
auto limit = require_full_guard_regions ? WasmMemoryTracker::kSoftLimit
: WasmMemoryTracker::kHardLimit;
if (memory_tracker->ReserveAddressSpace(*allocation_length, limit)) break;
did_retry = true;
// After first and second GC: retry.
if (trial == kAllocationRetries) {
// If we fail to allocate guard regions and the fallback is enabled, then
// retry without full guard regions.
if (require_full_guard_regions && FLAG_wasm_trap_handler_fallback) {
require_full_guard_regions = false;
--trial; // one more try.
continue;
}
// We are over the address space limit. Fail.
//
// When running under the correctness fuzzer (i.e.
// --abort-on-stack-or-string-length-overflow is preset), we crash instead
// so it is not incorrectly reported as a correctness violation. See
// https://crbug.com/828293#c4
// --abort-on-stack-or-string-length-overflow is preset), we crash
// instead so it is not incorrectly reported as a correctness
// violation. See https://crbug.com/828293#c4
if (FLAG_abort_on_stack_or_string_length_overflow) {
FATAL("could not allocate wasm memory");
}
@ -118,6 +130,22 @@ void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
: AllocationStatus::kSuccess);
return memory;
}
#if V8_TARGET_ARCH_MIPS64
// MIPS64 has a user space of 2^40 bytes on most processors,
// address space limits needs to be smaller.
constexpr size_t kAddressSpaceSoftLimit = 0x2100000000L; // 132 GiB
constexpr size_t kAddressSpaceHardLimit = 0x4000000000L; // 256 GiB
#elif V8_TARGET_ARCH_64_BIT
// We set the limit to 1 TiB + 4 GiB so that there is room for mini-guards
// once we fill everything up with full-sized guard regions.
constexpr size_t kAddressSpaceSoftLimit = 0x10100000000L; // 1 TiB + 4 GiB
constexpr size_t kAddressSpaceHardLimit = 0x20000000000L; // 2 TiB
#else
constexpr size_t kAddressSpaceSoftLimit = 0x90000000; // 2 GiB + 256 MiB
constexpr size_t kAddressSpaceHardLimit = 0xC0000000; // 3 GiB
#endif
} // namespace
WasmMemoryTracker::~WasmMemoryTracker() {
@ -127,25 +155,14 @@ WasmMemoryTracker::~WasmMemoryTracker() {
DCHECK_EQ(allocated_address_space_, 0u);
}
bool WasmMemoryTracker::ReserveAddressSpace(size_t num_bytes) {
// Address space reservations are currently only meaningful using guard
// regions, which is currently only supported on 64-bit systems. On other
// platforms, we always fall back on bounds checks.
#if V8_TARGET_ARCH_MIPS64
// MIPS64 has a user space of 2^40 bytes on most processors,
// address space limits needs to be smaller.
constexpr size_t kAddressSpaceLimit = 0x2100000000L; // 132 GiB
#elif V8_TARGET_ARCH_64_BIT
// We set the limit to 1 TiB + 4 GiB so that there is room for mini-guards
// once we fill everything up with full-sized guard regions.
constexpr size_t kAddressSpaceLimit = 0x10100000000L; // 1 TiB + 4 GiB
#else
constexpr size_t kAddressSpaceLimit = 0x90000000; // 2 GiB + 256 MiB
#endif
bool WasmMemoryTracker::ReserveAddressSpace(size_t num_bytes,
ReservationLimit limit) {
size_t reservation_limit =
limit == kSoftLimit ? kAddressSpaceSoftLimit : kAddressSpaceHardLimit;
while (true) {
size_t old_count = reserved_address_space_.load();
if (kAddressSpaceLimit - old_count < num_bytes) return false;
if (old_count > reservation_limit) return false;
if (reservation_limit - old_count < num_bytes) return false;
if (reserved_address_space_.compare_exchange_weak(old_count,
old_count + num_bytes)) {
return true;
@ -273,25 +290,9 @@ MaybeHandle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
void* allocation_base = nullptr;
size_t allocation_length = 0;
#if V8_TARGET_ARCH_64_BIT
bool require_full_guard_regions = true;
#else
bool require_full_guard_regions = false;
#endif
void* memory = TryAllocateBackingStore(memory_tracker, isolate->heap(), size,
require_full_guard_regions,
&allocation_base, &allocation_length);
if (memory == nullptr && FLAG_wasm_trap_handler_fallback) {
// If we failed to allocate with full guard regions, fall back on
// mini-guards.
require_full_guard_regions = false;
memory = TryAllocateBackingStore(memory_tracker, isolate->heap(), size,
require_full_guard_regions,
&allocation_base, &allocation_length);
}
if (memory == nullptr) {
return {};
}
if (memory == nullptr) return {};
#if DEBUG
// Double check the API allocator actually zero-initialized the memory.

View File

@ -30,7 +30,10 @@ class WasmMemoryTracker {
// ReserveAddressSpace attempts to increase the reserved address space counter
// by {num_bytes}. Returns true if successful (meaning it is okay to go ahead
// and reserve {num_bytes} bytes), false otherwise.
bool ReserveAddressSpace(size_t num_bytes);
// Use {kSoftLimit} if you can implement a fallback which needs less reserved
// memory.
enum ReservationLimit { kSoftLimit, kHardLimit };
bool ReserveAddressSpace(size_t num_bytes, ReservationLimit limit);
void RegisterAllocation(Isolate* isolate, void* allocation_base,
size_t allocation_length, void* buffer_start,

View File

@ -33,17 +33,14 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
// space per isolate (see kAddressSpaceLimit in wasm-memory.cc), which allows
// up to 128 fast memories. As long as we create more than that, we should
// trigger the fallback behavior.
for (var i = 0; i < 135; i++) {
for (var i = 0; i < 135 && !fallback_occurred; i++) {
memory = new WebAssembly.Memory({initial: 1});
instance = builder.instantiate({mod: {imported_mem: memory}});
instances.push(instance);
assertTraps(kTrapMemOutOfBounds, () => instance.exports.load(1 << 20));
fallback_occurred = fallback_occurred || !%WasmMemoryHasFullGuardRegion(memory);
if (fallback_occurred) {
break;
}
fallback_occurred = !%WasmMemoryHasFullGuardRegion(memory);
}
assertTrue(fallback_occurred);
})();
@ -63,17 +60,14 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
// space per isolate (see kAddressSpaceLimit in wasm-memory.cc), which allows
// up to 128 fast memories. As long as we create more than that, we should
// trigger the fallback behavior.
for (var i = 0; i < 135; i++) {
for (var i = 0; i < 135 && !fallback_occurred; i++) {
memory = new WebAssembly.Memory({initial: 1});
instance = builder.instantiate({mod: {imported_mem: memory}});
instances.push(instance);
assertTraps(kTrapMemOutOfBounds, () => instance.exports.load(1 << 20));
fallback_occurred = fallback_occurred || !%WasmMemoryHasFullGuardRegion(memory);
if (fallback_occurred) {
break;
}
fallback_occurred = !%WasmMemoryHasFullGuardRegion(memory);
}
assertTrue(fallback_occurred);
})();
@ -132,17 +126,14 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
// up to 128 fast memories. As long as we create more than that, we should
// trigger the fallback behavior.
const module = builder.toModule();
for (var i = 0; i < 135; i++) {
for (var i = 0; i < 135 && !fallback_occurred; i++) {
memory = new WebAssembly.Memory({initial: 1});
instance = new WebAssembly.Instance(module, {mod: {imported_mem: memory}});
instances.push(instance);
assertTraps(kTrapMemOutOfBounds, () => instance.exports.load(1 << 20));
fallback_occurred = fallback_occurred || !%WasmMemoryHasFullGuardRegion(memory);
if (fallback_occurred) {
break;
}
fallback_occurred = !%WasmMemoryHasFullGuardRegion(memory);
}
assertTrue(fallback_occurred);
})();