Revert "[wasm] Introduce a soft limit on reserved memory"

This reverts commit 3bb5cb63da.

Reason for revert: Breaks Win64 bot https://ci.chromium.org/p/v8/builders/luci.v8.ci/V8%20Win64/26418

Original change's description:
> [wasm] Introduce a soft limit on reserved memory
> 
> Currently, wasm memory and wasm code use a shared limit for the total
> size of reservations. This can cause wasm code reservations to fail
> because wasm memories used all available reservation space.
> This CL introduces a soft limit which is used when allocating wasm
> memory with full guards. If this limit is reached and the respective
> flag is set, we fall back to allocation without full guards and check
> against the hard limit. Code reservations always check against the hard
> limit.
> 
> R=​ahaas@chromium.org
> 
> Bug: v8:8196
> Change-Id: I3fcbaeaa6f72c972d408d291af5d6b788d43151d
> Reviewed-on: https://chromium-review.googlesource.com/1233614
> Reviewed-by: Andreas Haas <ahaas@chromium.org>
> Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#56028}

TBR=ahaas@chromium.org,clemensh@chromium.org

Change-Id: If645e738b4a5800eceabd993738ac2285f4a63bc
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: v8:8196
Reviewed-on: https://chromium-review.googlesource.com/1233834
Reviewed-by: Leszek Swirski <leszeks@chromium.org>
Commit-Queue: Leszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/master@{#56031}
This commit is contained in:
Leszek Swirski 2018-09-19 13:06:32 +00:00 committed by Commit Bot
parent 737a4e4c19
commit 196874aa08
4 changed files with 74 additions and 72 deletions

View File

@ -817,10 +817,7 @@ VirtualMemory WasmCodeManager::TryAllocate(size_t size, void* hint) {
v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
DCHECK_GT(size, 0);
size = RoundUp(size, page_allocator->AllocatePageSize());
if (!memory_tracker_->ReserveAddressSpace(size,
WasmMemoryTracker::kHardLimit)) {
return {};
}
if (!memory_tracker_->ReserveAddressSpace(size)) return {};
if (hint == nullptr) hint = page_allocator->GetRandomMmapAddr();
VirtualMemory mem(page_allocator, size, hint,

View File

@ -27,14 +27,28 @@ void AddAllocationStatusSample(Isolate* isolate,
}
void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
size_t size, void** allocation_base,
size_t size, bool require_full_guard_regions,
void** allocation_base,
size_t* allocation_length) {
using AllocationStatus = WasmMemoryTracker::AllocationStatus;
#if V8_TARGET_ARCH_64_BIT
bool require_full_guard_regions = true;
#else
bool require_full_guard_regions = false;
#if V8_TARGET_ARCH_32_BIT
DCHECK(!require_full_guard_regions);
#endif
// We always allocate the largest possible offset into the heap, so the
// addressable memory after the guard page can be made inaccessible.
//
// To protect against 32-bit integer overflow issues, we also protect the 2GiB
// before the valid part of the memory buffer.
// TODO(7881): do not use static_cast<uint32_t>() here
*allocation_length =
require_full_guard_regions
? RoundUp(kWasmMaxHeapOffset + kNegativeGuardSize, CommitPageSize())
: RoundUp(
base::bits::RoundUpToPowerOfTwo32(static_cast<uint32_t>(size)),
kWasmPageSize);
DCHECK_GE(*allocation_length, size);
DCHECK_GE(*allocation_length, kWasmPageSize);
// Let the WasmMemoryTracker know we are going to reserve a bunch of
// address space.
// Try up to three times; getting rid of dead JSArrayBuffer allocations might
@ -43,43 +57,17 @@ void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
static constexpr int kAllocationRetries = 2;
bool did_retry = false;
for (int trial = 0;; ++trial) {
// For guard regions, we always allocate the largest possible offset into
// the heap, so the addressable memory after the guard page can be made
// inaccessible.
//
// To protect against 32-bit integer overflow issues, we also protect the
// 2GiB before the valid part of the memory buffer.
// TODO(7881): do not use static_cast<uint32_t>() here
*allocation_length =
require_full_guard_regions
? RoundUp(kWasmMaxHeapOffset + kNegativeGuardSize, CommitPageSize())
: RoundUp(base::bits::RoundUpToPowerOfTwo32(
static_cast<uint32_t>(size)),
kWasmPageSize);
DCHECK_GE(*allocation_length, size);
DCHECK_GE(*allocation_length, kWasmPageSize);
auto limit = require_full_guard_regions ? WasmMemoryTracker::kSoftLimit
: WasmMemoryTracker::kHardLimit;
if (memory_tracker->ReserveAddressSpace(*allocation_length, limit)) break;
if (memory_tracker->ReserveAddressSpace(*allocation_length)) break;
did_retry = true;
// After first and second GC: retry.
if (trial == kAllocationRetries) {
// If we fail to allocate guard regions and the fallback is enabled, then
// retry without full guard regions.
if (require_full_guard_regions && FLAG_wasm_trap_handler_fallback) {
require_full_guard_regions = false;
--trial; // one more try.
continue;
}
// We are over the address space limit. Fail.
//
// When running under the correctness fuzzer (i.e.
// --abort-on-stack-or-string-length-overflow is preset), we crash
// instead so it is not incorrectly reported as a correctness
// violation. See https://crbug.com/828293#c4
// --abort-on-stack-or-string-length-overflow is preset), we crash instead
// so it is not incorrectly reported as a correctness violation. See
// https://crbug.com/828293#c4
if (FLAG_abort_on_stack_or_string_length_overflow) {
FATAL("could not allocate wasm memory");
}
@ -130,22 +118,6 @@ void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
: AllocationStatus::kSuccess);
return memory;
}
#if V8_TARGET_ARCH_MIPS64
// MIPS64 has a user space of 2^40 bytes on most processors,
// address space limits needs to be smaller.
constexpr size_t kAddressSpaceSoftLimit = 0x2100000000L; // 132 GiB
constexpr size_t kAddressSpaceHardLimit = 0x4000000000L; // 256 GiB
#elif V8_TARGET_ARCH_64_BIT
// We set the limit to 1 TiB + 4 GiB so that there is room for mini-guards
// once we fill everything up with full-sized guard regions.
constexpr size_t kAddressSpaceSoftLimit = 0x10100000000L; // 1 TiB + 4 GiB
constexpr size_t kAddressSpaceHardLimit = 0x20000000000L; // 2 TiB
#else
constexpr size_t kAddressSpaceSoftLimit = 0x90000000; // 2 GiB + 256 MiB
constexpr size_t kAddressSpaceHardLimit = 0xC0000000; // 3 GiB
#endif
} // namespace
WasmMemoryTracker::~WasmMemoryTracker() {
@ -155,14 +127,25 @@ WasmMemoryTracker::~WasmMemoryTracker() {
DCHECK_EQ(allocated_address_space_, 0u);
}
bool WasmMemoryTracker::ReserveAddressSpace(size_t num_bytes,
ReservationLimit limit) {
size_t reservation_limit =
limit == kSoftLimit ? kAddressSpaceSoftLimit : kAddressSpaceHardLimit;
bool WasmMemoryTracker::ReserveAddressSpace(size_t num_bytes) {
// Address space reservations are currently only meaningful using guard
// regions, which is currently only supported on 64-bit systems. On other
// platforms, we always fall back on bounds checks.
#if V8_TARGET_ARCH_MIPS64
// MIPS64 has a user space of 2^40 bytes on most processors,
// address space limits needs to be smaller.
constexpr size_t kAddressSpaceLimit = 0x2100000000L; // 132 GiB
#elif V8_TARGET_ARCH_64_BIT
// We set the limit to 1 TiB + 4 GiB so that there is room for mini-guards
// once we fill everything up with full-sized guard regions.
constexpr size_t kAddressSpaceLimit = 0x10100000000L; // 1 TiB + 4 GiB
#else
constexpr size_t kAddressSpaceLimit = 0x90000000; // 2 GiB + 256 MiB
#endif
while (true) {
size_t old_count = reserved_address_space_.load();
if (old_count > reservation_limit) return false;
if (reservation_limit - old_count < num_bytes) return false;
if (kAddressSpaceLimit - old_count < num_bytes) return false;
if (reserved_address_space_.compare_exchange_weak(old_count,
old_count + num_bytes)) {
return true;
@ -290,9 +273,25 @@ MaybeHandle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
void* allocation_base = nullptr;
size_t allocation_length = 0;
#if V8_TARGET_ARCH_64_BIT
bool require_full_guard_regions = true;
#else
bool require_full_guard_regions = false;
#endif
void* memory = TryAllocateBackingStore(memory_tracker, isolate->heap(), size,
require_full_guard_regions,
&allocation_base, &allocation_length);
if (memory == nullptr) return {};
if (memory == nullptr && FLAG_wasm_trap_handler_fallback) {
// If we failed to allocate with full guard regions, fall back on
// mini-guards.
require_full_guard_regions = false;
memory = TryAllocateBackingStore(memory_tracker, isolate->heap(), size,
require_full_guard_regions,
&allocation_base, &allocation_length);
}
if (memory == nullptr) {
return {};
}
#if DEBUG
// Double check the API allocator actually zero-initialized the memory.

View File

@ -30,10 +30,7 @@ class WasmMemoryTracker {
// ReserveAddressSpace attempts to increase the reserved address space counter
// by {num_bytes}. Returns true if successful (meaning it is okay to go ahead
// and reserve {num_bytes} bytes), false otherwise.
// Use {kSoftLimit} if you can implement a fallback which needs less reserved
// memory.
enum ReservationLimit { kSoftLimit, kHardLimit };
bool ReserveAddressSpace(size_t num_bytes, ReservationLimit limit);
bool ReserveAddressSpace(size_t num_bytes);
void RegisterAllocation(Isolate* isolate, void* allocation_base,
size_t allocation_length, void* buffer_start,

View File

@ -33,14 +33,17 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
// space per isolate (see kAddressSpaceLimit in wasm-memory.cc), which allows
// up to 128 fast memories. As long as we create more than that, we should
// trigger the fallback behavior.
for (var i = 0; i < 135 && !fallback_occurred; i++) {
for (var i = 0; i < 135; i++) {
memory = new WebAssembly.Memory({initial: 1});
instance = builder.instantiate({mod: {imported_mem: memory}});
instances.push(instance);
assertTraps(kTrapMemOutOfBounds, () => instance.exports.load(1 << 20));
fallback_occurred = !%WasmMemoryHasFullGuardRegion(memory);
fallback_occurred = fallback_occurred || !%WasmMemoryHasFullGuardRegion(memory);
if (fallback_occurred) {
break;
}
}
assertTrue(fallback_occurred);
})();
@ -60,14 +63,17 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
// space per isolate (see kAddressSpaceLimit in wasm-memory.cc), which allows
// up to 128 fast memories. As long as we create more than that, we should
// trigger the fallback behavior.
for (var i = 0; i < 135 && !fallback_occurred; i++) {
for (var i = 0; i < 135; i++) {
memory = new WebAssembly.Memory({initial: 1});
instance = builder.instantiate({mod: {imported_mem: memory}});
instances.push(instance);
assertTraps(kTrapMemOutOfBounds, () => instance.exports.load(1 << 20));
fallback_occurred = !%WasmMemoryHasFullGuardRegion(memory);
fallback_occurred = fallback_occurred || !%WasmMemoryHasFullGuardRegion(memory);
if (fallback_occurred) {
break;
}
}
assertTrue(fallback_occurred);
})();
@ -126,14 +132,17 @@ load("test/mjsunit/wasm/wasm-module-builder.js");
// up to 128 fast memories. As long as we create more than that, we should
// trigger the fallback behavior.
const module = builder.toModule();
for (var i = 0; i < 135 && !fallback_occurred; i++) {
for (var i = 0; i < 135; i++) {
memory = new WebAssembly.Memory({initial: 1});
instance = new WebAssembly.Instance(module, {mod: {imported_mem: memory}});
instances.push(instance);
assertTraps(kTrapMemOutOfBounds, () => instance.exports.load(1 << 20));
fallback_occurred = !%WasmMemoryHasFullGuardRegion(memory);
fallback_occurred = fallback_occurred || !%WasmMemoryHasFullGuardRegion(memory);
if (fallback_occurred) {
break;
}
}
assertTrue(fallback_occurred);
})();