[API] Don't use VM methods of v8::ArrayBuffer::Allocator.

- Replaces calls to Allocator Reserve, Free, and SetPermissions
  with equivalent page allocator calls (allocation.h).
- Un-implements these methods to catch usage, in preparation for
  removing these.

Bug: chromium:799573
Cq-Include-Trybots: master.tryserver.chromium.linux:linux_chromium_rel_ng
Change-Id: Id233b7a9cfc8e332c64e514f6359e8b670c2d75e
Reviewed-on: https://chromium-review.googlesource.com/911883
Commit-Queue: Bill Budge <bbudge@chromium.org>
Reviewed-by: Eric Holk <eholk@chromium.org>
Reviewed-by: Peter Marshall <petermarshall@chromium.org>
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: Hannes Payer <hpayer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51340}
This commit is contained in:
Bill Budge 2018-02-16 12:15:00 -08:00 committed by Commit Bot
parent fd9251dbec
commit 95e39b0623
7 changed files with 35 additions and 91 deletions

View File

@ -461,16 +461,7 @@ void* v8::ArrayBuffer::Allocator::Reserve(size_t length) { UNIMPLEMENTED(); }
void v8::ArrayBuffer::Allocator::Free(void* data, size_t length,
AllocationMode mode) {
switch (mode) {
case AllocationMode::kNormal: {
Free(data, length);
return;
}
case AllocationMode::kReservation: {
UNIMPLEMENTED();
return;
}
}
UNIMPLEMENTED();
}
void v8::ArrayBuffer::Allocator::SetProtection(
@ -483,7 +474,7 @@ namespace {
class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
public:
virtual void* Allocate(size_t length) {
void* Allocate(size_t length) override {
#if V8_OS_AIX && _LINUX_SOURCE_COMPAT
// Work around for GCC bug on AIX
// See: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79839
@ -494,7 +485,7 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
return data;
}
virtual void* AllocateUninitialized(size_t length) {
void* AllocateUninitialized(size_t length) override {
#if V8_OS_AIX && _LINUX_SOURCE_COMPAT
// Work around for GCC bug on AIX
// See: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=79839
@ -505,42 +496,7 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
return data;
}
virtual void Free(void* data, size_t) { free(data); }
virtual void* Reserve(size_t length) {
size_t page_size = i::AllocatePageSize();
size_t allocated = RoundUp(length, page_size);
void* address = i::AllocatePages(i::GetRandomMmapAddr(), allocated,
page_size, PageAllocator::kNoAccess);
return address;
}
virtual void Free(void* data, size_t length,
v8::ArrayBuffer::Allocator::AllocationMode mode) {
switch (mode) {
case v8::ArrayBuffer::Allocator::AllocationMode::kNormal: {
return Free(data, length);
}
case v8::ArrayBuffer::Allocator::AllocationMode::kReservation: {
size_t page_size = i::AllocatePageSize();
size_t allocated = RoundUp(length, page_size);
CHECK(i::FreePages(data, allocated));
return;
}
}
}
virtual void SetProtection(
void* data, size_t length,
v8::ArrayBuffer::Allocator::Protection protection) {
DCHECK(protection == v8::ArrayBuffer::Allocator::Protection::kNoAccess ||
protection == v8::ArrayBuffer::Allocator::Protection::kReadWrite);
PageAllocator::Permission permission =
(protection == v8::ArrayBuffer::Allocator::Protection::kReadWrite)
? PageAllocator::kReadWrite
: PageAllocator::kNoAccess;
CHECK(i::SetPermissions(data, length, permission));
}
void Free(void* data, size_t) override { free(data); }
};
bool RunExtraCode(Isolate* isolate, Local<Context> context,

View File

@ -84,15 +84,18 @@ class ArrayBufferAllocatorBase : public v8::ArrayBuffer::Allocator {
allocator_->Free(data, length);
}
void* Reserve(size_t length) override { return allocator_->Reserve(length); }
void* Reserve(size_t length) override {
UNIMPLEMENTED();
return nullptr;
}
void Free(void* data, size_t length, AllocationMode mode) override {
allocator_->Free(data, length, mode);
UNIMPLEMENTED();
}
void SetProtection(void* data, size_t length,
Protection protection) override {
allocator_->SetProtection(data, length, protection);
UNIMPLEMENTED();
}
private:
@ -121,18 +124,6 @@ class ShellArrayBufferAllocator : public ArrayBufferAllocatorBase {
}
}
void* Reserve(size_t length) override {
// |length| must be over the threshold so we can distinguish VM from
// malloced memory.
DCHECK_LE(kVMThreshold, length);
return ArrayBufferAllocatorBase::Reserve(length);
}
void Free(void* data, size_t length, AllocationMode) override {
// Ignore allocation mode; the appropriate action is determined by |length|.
Free(data, length);
}
private:
static constexpr size_t kVMThreshold = 65536;
static constexpr size_t kTwoGB = 2u * 1024u * 1024u * 1024u;
@ -172,14 +163,6 @@ class MockArrayBufferAllocator : public ArrayBufferAllocatorBase {
return ArrayBufferAllocatorBase::Free(data, Adjust(length));
}
void* Reserve(size_t length) override {
return ArrayBufferAllocatorBase::Reserve(Adjust(length));
}
void Free(void* data, size_t length, AllocationMode mode) override {
return ArrayBufferAllocatorBase::Free(data, Adjust(length), mode);
}
private:
size_t Adjust(size_t length) {
const size_t kAllocationLimit = 10 * kMB;
@ -2574,7 +2557,11 @@ void SourceGroup::JoinThread() {
ExternalizedContents::~ExternalizedContents() {
if (base_ != nullptr) {
Shell::array_buffer_allocator->Free(base_, length_, mode_);
if (mode_ == ArrayBuffer::Allocator::AllocationMode::kReservation) {
CHECK(i::FreePages(base_, length_));
} else {
Shell::array_buffer_allocator->Free(base_, length_);
}
}
}

View File

@ -19211,10 +19211,11 @@ void JSArrayBuffer::FreeBackingStore(Isolate* isolate, Allocation allocation) {
// actually a buffer we are tracking.
isolate->wasm_engine()->allocation_tracker()->ReleaseAddressSpace(
allocation.length);
CHECK(FreePages(allocation.allocation_base, allocation.length));
} else {
isolate->array_buffer_allocator()->Free(allocation.allocation_base,
allocation.length);
}
isolate->array_buffer_allocator()->Free(allocation.allocation_base,
allocation.length, allocation.mode);
}
void JSArrayBuffer::Setup(Handle<JSArrayBuffer> array_buffer, Isolate* isolate,

View File

@ -54,8 +54,9 @@ void* TryAllocateBackingStore(Isolate* isolate, size_t size,
// We always allocate the largest possible offset into the heap, so the
// addressable memory after the guard page can be made inaccessible.
*allocation_length = RoundUp(kWasmMaxHeapOffset, CommitPageSize());
DCHECK_EQ(0, size % CommitPageSize());
size_t page_size = AllocatePageSize();
*allocation_length = RoundUp(kWasmMaxHeapOffset, page_size);
DCHECK_EQ(0, size % page_size);
WasmAllocationTracker* const allocation_tracker =
isolate->wasm_engine()->allocation_tracker();
@ -67,9 +68,9 @@ void* TryAllocateBackingStore(Isolate* isolate, size_t size,
return nullptr;
}
// The Reserve makes the whole region inaccessible by default.
*allocation_base =
isolate->array_buffer_allocator()->Reserve(*allocation_length);
// Make the whole region inaccessible by default.
*allocation_base = AllocatePages(nullptr, *allocation_length, kWasmPageSize,
PageAllocator::kNoAccess);
if (*allocation_base == nullptr) {
allocation_tracker->ReleaseAddressSpace(*allocation_length);
return nullptr;
@ -78,8 +79,7 @@ void* TryAllocateBackingStore(Isolate* isolate, size_t size,
void* memory = *allocation_base;
// Make the part we care about accessible.
isolate->array_buffer_allocator()->SetProtection(
memory, size, v8::ArrayBuffer::Allocator::Protection::kReadWrite);
CHECK(SetPermissions(memory, size, PageAllocator::kReadWrite));
reinterpret_cast<v8::Isolate*>(isolate)
->AdjustAmountOfExternalAllocatedMemory(size);

View File

@ -404,9 +404,8 @@ Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
if ((enable_guard_regions || old_size == new_size) && old_size != 0) {
DCHECK_NOT_NULL(old_buffer->backing_store());
if (old_size != new_size) {
isolate->array_buffer_allocator()->SetProtection(
old_mem_start, new_size,
v8::ArrayBuffer::Allocator::Protection::kReadWrite);
CHECK(i::SetPermissions(old_mem_start, new_size,
PageAllocator::kReadWrite));
reinterpret_cast<v8::Isolate*>(isolate)
->AdjustAmountOfExternalAllocatedMemory(pages * wasm::kWasmPageSize);
}

View File

@ -1017,8 +1017,13 @@ struct ManuallyExternalizedBuffer {
}
~ManuallyExternalizedBuffer() {
if (!buffer_->has_guard_region()) {
isolate_->array_buffer_allocator()->Free(
allocation_base_, allocation_length_, buffer_->allocation_mode());
if (buffer_->allocation_mode() ==
ArrayBuffer::Allocator::AllocationMode::kReservation) {
CHECK(v8::internal::FreePages(allocation_base_, allocation_length_));
} else {
isolate_->array_buffer_allocator()->Free(allocation_base_,
allocation_length_);
}
}
}
};

View File

@ -1874,11 +1874,7 @@ class OOMArrayBufferAllocator : public ArrayBuffer::Allocator {
public:
void* Allocate(size_t) override { return nullptr; }
void* AllocateUninitialized(size_t) override { return nullptr; }
void* Reserve(size_t length) override { return nullptr; }
void Free(void* data, size_t length, AllocationMode mode) override {}
void Free(void*, size_t) override {}
void SetProtection(void* data, size_t length,
Protection protection) override {}
};
TEST_F(ValueSerializerTest, DecodeArrayBufferOOM) {