Revert "[wasm] Add guard pages before Wasm Memory"

This reverts commit d7cdea6fa2.

Reason for revert: Flakiness on bots

Original change's description:
> [wasm] Add guard pages before Wasm Memory
> 
> Although Wasm memory indices are all unsigned, they sometimes get assembled
> as 32-bit signed immediates. Values in the top half of the Wasm memory space
> will then get sign extended, causing Wasm to access in front of its memory
> buffer.
> 
> Usually this region is not mapped anyway, so faults still happen as they are
> supposed to. This change protects this region with guard pages so we are
> guaranteed to always fault when this happens.
> 
> Bug: v8:5277
> Change-Id: Id791fbe2a5ac1b1d75460e65c72b5b9db2a47ee7
> Reviewed-on: https://chromium-review.googlesource.com/484747
> Commit-Queue: Eric Holk <eholk@chromium.org>
> Reviewed-by: Mircea Trofin <mtrofin@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#44905}

TBR=bradnelson@chromium.org,gdeepti@chromium.org,mtrofin@chromium.org,eholk@chromium.org,mseaborn@chromium.org,adamk@chromium.org,v8-reviews@googlegroups.com,wasm-v8@google.com
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true

Change-Id: Ia1d3e5dbf4f518815a9fd4197047077bc8e42816
Reviewed-on: https://chromium-review.googlesource.com/487828
Reviewed-by: Adam Klein <adamk@chromium.org>
Commit-Queue: Adam Klein <adamk@chromium.org>
Cr-Commit-Position: refs/heads/master@{#44907}
This commit is contained in:
Eric Holk 2017-04-26 20:52:57 +00:00 committed by Commit Bot
parent 86aa7960cc
commit 54be464fe4
5 changed files with 9 additions and 56 deletions

View File

@ -951,19 +951,6 @@ void WasmJs::Install(Isolate* isolate) {
JSObject::AddProperty(memory_proto, factory->to_string_tag_symbol(),
v8_str(isolate, "WebAssembly.Memory"), ro_attributes);
#if DEBUG && V8_TRAP_HANDLER_SUPPORTED
// Make sure the guard regions are the correct size. We do this check
// dynamically to avoid introducing static initializers when computing this
// constant.
//
// kWasmMaxHeapOffset is the largest offset that a Wasm program can compute.
// We round up to the page size, since this all must be done on page
// granularities, and then multiply by 2 because we add guard regions before
// and after the Wasm heap so we can protect against negative offsets.
DCHECK(wasm::kTotalGuardRegionSize ==
2 * RoundUp(wasm::kWasmMaxHeapOffset, base::OS::CommitPageSize()));
#endif
// Setup errors
attributes = static_cast<PropertyAttributes>(DONT_ENUM);
Handle<JSFunction> compile_error(

View File

@ -44,12 +44,6 @@ constexpr uint64_t kWasmMaxHeapOffset =
std::numeric_limits<uint32_t>::max()) // maximum base value
+ std::numeric_limits<uint32_t>::max(); // maximum index value
// The total size of the memory region to allocate when using trap handlers.
// The Wasm heap will be placed within this region, at offset
// kLowerGuardRegionSize.
constexpr uint64_t kTotalGuardRegionSize = (1LL << 34); // 16GB
constexpr uint64_t kLowerGuardRegionSize = kTotalGuardRegionSize / 2;
// Limit the control stack size of the C++ wasm interpreter.
constexpr size_t kV8MaxWasmInterpretedStackSize = 64 * 1024;

View File

@ -60,19 +60,13 @@ static void MemoryFinalizer(const v8::WeakCallbackInfo<void>& data) {
JSArrayBuffer* buffer = *p;
if (!buffer->was_neutered()) {
#if V8_TARGET_ARCH_64_BIT
void* memory = buffer->backing_store();
DCHECK(memory != nullptr);
base::OS::Free(GetGuardRegionStartFromMemoryStart(memory),
kTotalGuardRegionSize);
base::OS::Free(memory,
RoundUp(kWasmMaxHeapOffset, base::OS::CommitPageSize()));
data.GetIsolate()->AdjustAmountOfExternalAllocatedMemory(
-buffer->byte_length()->Number());
#else
DCHECK(false &&
"Attempting to free guarded memory when trap handlers are not "
"supported");
#endif
}
GlobalHandles::Destroy(reinterpret_cast<Object**>(p));
@ -99,23 +93,21 @@ void* TryAllocateBackingStore(Isolate* isolate, size_t size,
// things that would be unsafe if they expected guard pages where there
// weren't any.
if (enable_guard_regions && kGuardRegionsSupported) {
#if V8_TARGET_ARCH_64_BIT
// TODO(eholk): On Windows we want to make sure we don't commit the guard
// pages yet.
// We always allocate the largest possible offset into the heap, so the
// addressable memory after the guard page can be made inaccessible.
const size_t alloc_size = kTotalGuardRegionSize;
const size_t alloc_size =
RoundUp(kWasmMaxHeapOffset, base::OS::CommitPageSize());
DCHECK_EQ(0, size % base::OS::CommitPageSize());
// AllocateGuarded makes the whole region inaccessible by default.
void* guard_region = base::OS::AllocateGuarded(alloc_size);
if (guard_region == nullptr) {
void* memory = base::OS::AllocateGuarded(alloc_size);
if (memory == nullptr) {
return nullptr;
}
void* memory = GetMemoryStartFromGuardRegionStart(guard_region);
// Make the part we care about accessible.
base::OS::Unprotect(memory, size);
@ -124,10 +116,6 @@ void* TryAllocateBackingStore(Isolate* isolate, size_t size,
is_external = true;
return memory;
#else
DCHECK(false && "Guard regions are not supported on this platform.");
return nullptr;
#endif
} else {
void* memory = isolate->array_buffer_allocator()->Allocate(size);
return memory;

View File

@ -15,7 +15,6 @@
#include "src/parsing/preparse-data.h"
#include "src/wasm/signature-map.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-opcodes.h"
namespace v8 {
@ -483,16 +482,6 @@ inline bool EnableGuardRegions() {
return FLAG_wasm_guard_pages && kGuardRegionsSupported;
}
inline void* GetMemoryStartFromGuardRegionStart(void* guard_region_start) {
DCHECK(guard_region_start != nullptr);
return static_cast<byte*>(guard_region_start) + kLowerGuardRegionSize;
}
inline void* GetGuardRegionStartFromMemoryStart(void* mem_start) {
DCHECK(mem_start != nullptr);
return static_cast<byte*>(mem_start) - kLowerGuardRegionSize;
}
void UnpackAndRegisterProtectedInstructions(Isolate* isolate,
Handle<FixedArray> code_table);

View File

@ -99,17 +99,12 @@ class TestingModule : public ModuleEnv {
~TestingModule() {
if (instance->mem_start) {
if (EnableGuardRegions() && module_.is_wasm()) {
#if V8_TARGET_ARCH_64_BIT
// See the corresponding code in AddMemory. We use a different
// allocation path when guard regions are enabled, which means we have
// to free it differently too.
const size_t alloc_size = kTotalGuardRegionSize;
v8::base::OS::Free(
GetGuardRegionStartFromMemoryStart(instance->mem_start),
alloc_size);
#else
DCHECK(false && "Guard regions are not supported on this platform");
#endif
const size_t alloc_size =
RoundUp(kWasmMaxHeapOffset, v8::base::OS::CommitPageSize());
v8::base::OS::Free(instance->mem_start, alloc_size);
} else {
free(instance->mem_start);
}