[wasm] Track Wasm allocations in WasmMemoryTracker

This moves the Wasm-specific metadata from being fields on the
ArrayBuffer into a table managed by WasmMemoryTracker.

Bug: chromium:776273
Cq-Include-Trybots: luci.chromium.try:linux_chromium_rel_ng
Change-Id: Id8b050bfdfe0fbe9436fb055e92c08d503d3c2ba
Reviewed-on: https://chromium-review.googlesource.com/850550
Commit-Queue: Eric Holk <eholk@chromium.org>
Reviewed-by: Ben Titzer <titzer@chromium.org>
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#52080}
This commit is contained in:
Eric Holk 2018-03-20 10:23:01 -07:00 committed by Commit Bot
parent 518b5c1c96
commit f866af42ae
16 changed files with 297 additions and 197 deletions

View File

@ -7684,17 +7684,23 @@ v8::ArrayBuffer::Contents v8::ArrayBuffer::Externalize() {
Utils::ApiCheck(!self->is_external(), "v8_ArrayBuffer_Externalize",
"ArrayBuffer already externalized");
self->set_is_external(true);
if (self->has_guard_region()) {
// We need to capture the contents before releasing the allocation from the
// Wasm tracker, because otherwise we will not correctly capture the
// allocation data.
const v8::ArrayBuffer::Contents contents = GetContents();
if (self->is_wasm_memory()) {
// Since this is being externalized, the Wasm Allocation Tracker can no
// longer track it.
//
// TODO(eholk): Find a way to track this across externalization
isolate->wasm_engine()->allocation_tracker()->ReleaseAddressSpace(
self->allocation_length());
isolate->wasm_engine()->memory_tracker()->ReleaseAllocation(
self->backing_store());
}
isolate->heap()->UnregisterArrayBuffer(*self);
return GetContents();
// A regular copy is good enough. No move semantics needed.
return contents;
}
@ -7704,7 +7710,7 @@ v8::ArrayBuffer::Contents v8::ArrayBuffer::GetContents() {
Contents contents;
contents.allocation_base_ = self->allocation_base();
contents.allocation_length_ = self->allocation_length();
contents.allocation_mode_ = self->has_guard_region()
contents.allocation_mode_ = self->is_wasm_memory()
? Allocator::AllocationMode::kReservation
: Allocator::AllocationMode::kNormal;
contents.data_ = self->backing_store();
@ -7907,16 +7913,23 @@ v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::Externalize() {
Utils::ApiCheck(!self->is_external(), "v8_SharedArrayBuffer_Externalize",
"SharedArrayBuffer already externalized");
self->set_is_external(true);
if (self->has_guard_region()) {
// We need to capture the contents before releasing the allocation from the
// Wasm tracker, because otherwise we will not correctly capture the
// allocation data.
const v8::SharedArrayBuffer::Contents contents = GetContents();
if (self->is_wasm_memory()) {
// Since this is being externalized, the Wasm Allocation Tracker can no
// longer track it.
//
// TODO(eholk): Find a way to track this across externalization
isolate->wasm_engine()->allocation_tracker()->ReleaseAddressSpace(
self->allocation_length());
isolate->wasm_engine()->memory_tracker()->ReleaseAllocation(
self->backing_store());
}
isolate->heap()->UnregisterArrayBuffer(*self);
return GetContents();
// A regular copy is good enough. No move semantics needed.
return contents;
}
@ -7927,7 +7940,7 @@ v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::GetContents() {
contents.allocation_base_ = self->allocation_base();
contents.allocation_length_ = self->allocation_length();
contents.allocation_mode_ =
self->has_guard_region()
self->is_wasm_memory()
? ArrayBufferAllocator::Allocator::AllocationMode::kReservation
: ArrayBufferAllocator::Allocator::AllocationMode::kNormal;
contents.data_ = self->backing_store();

View File

@ -55,9 +55,9 @@ void LocalArrayBufferTracker::Process(Callback callback) {
void* allocation_base = old_buffer->allocation_base();
DCHECK_NOT_NULL(allocation_base);
backing_stores_to_free->emplace_back(allocation_base,
old_buffer->allocation_length(),
old_buffer->allocation_mode());
backing_stores_to_free->emplace_back(
allocation_base, old_buffer->allocation_length(),
old_buffer->backing_store(), old_buffer->allocation_mode());
it = array_buffers_.erase(it);
} else {
UNREACHABLE();

View File

@ -148,11 +148,11 @@ class JSFunction::BodyDescriptor final : public BodyDescriptorBase {
class JSArrayBuffer::BodyDescriptor final : public BodyDescriptorBase {
public:
STATIC_ASSERT(kByteLengthOffset + kPointerSize == kBackingStoreOffset);
STATIC_ASSERT(kAllocationLengthOffset + kPointerSize == kBitFieldSlot);
STATIC_ASSERT(kBackingStoreOffset + kPointerSize == kBitFieldSlot);
STATIC_ASSERT(kBitFieldSlot + kPointerSize == kSize);
static bool IsValidSlot(HeapObject* obj, int offset) {
if (offset < kAllocationLengthOffset) return true;
if (offset < kBitFieldSlot) return true;
if (offset < kSize) return false;
return IsValidSlotImpl(obj, offset);
}

View File

@ -1038,7 +1038,7 @@ void JSArrayBuffer::JSArrayBufferPrint(std::ostream& os) { // NOLINT
if (is_neuterable()) os << "\n - neuterable";
if (was_neutered()) os << "\n - neutered";
if (is_shared()) os << "\n - shared";
if (has_guard_region()) os << "\n - has_guard_region";
if (is_wasm_memory()) os << "\n - is_wasm_memory";
if (is_growable()) os << "\n - growable";
JSObjectPrintBody(os, this, !was_neutered());
}

View File

@ -19021,8 +19021,6 @@ void JSArrayBuffer::Neuter() {
CHECK(is_external());
set_backing_store(nullptr);
set_byte_length(Smi::kZero);
set_allocation_base(nullptr);
set_allocation_length(0);
set_was_neutered(true);
set_is_neuterable(false);
// Invalidate the neutering protector.
@ -19036,28 +19034,21 @@ void JSArrayBuffer::FreeBackingStore() {
if (allocation_base() == nullptr) {
return;
}
using AllocationMode = ArrayBuffer::Allocator::AllocationMode;
const size_t length = allocation_length();
const AllocationMode mode = allocation_mode();
FreeBackingStore(GetIsolate(), {allocation_base(), length, mode});
FreeBackingStore(GetIsolate(), {allocation_base(), allocation_length(),
backing_store(), allocation_mode()});
// Zero out the backing store and allocation base to avoid dangling
// pointers.
set_backing_store(nullptr);
// TODO(eholk): set_byte_length(0) once we aren't using Smis for the
// byte_length. We can't do it now because the GC needs to call
// FreeBackingStore while it is collecting.
set_allocation_base(nullptr);
set_allocation_length(0);
}
// static
void JSArrayBuffer::FreeBackingStore(Isolate* isolate, Allocation allocation) {
if (allocation.mode == ArrayBuffer::Allocator::AllocationMode::kReservation) {
// TODO(eholk): check with WasmAllocationTracker to make sure this is
// actually a buffer we are tracking.
isolate->wasm_engine()->allocation_tracker()->ReleaseAddressSpace(
allocation.length);
wasm::WasmMemoryTracker* memory_tracker =
isolate->wasm_engine()->memory_tracker();
if (memory_tracker->IsWasmMemory(allocation.backing_store)) {
memory_tracker->ReleaseAllocation(allocation.backing_store);
}
CHECK(FreePages(allocation.allocation_base, allocation.length));
} else {
isolate->array_buffer_allocator()->Free(allocation.allocation_base,
@ -19065,17 +19056,13 @@ void JSArrayBuffer::FreeBackingStore(Isolate* isolate, Allocation allocation) {
}
}
void JSArrayBuffer::Setup(Handle<JSArrayBuffer> array_buffer, Isolate* isolate,
bool is_external, void* data, size_t allocated_length,
SharedFlag shared) {
return Setup(array_buffer, isolate, is_external, data, allocated_length, data,
allocated_length, shared);
void JSArrayBuffer::set_is_wasm_memory(bool is_wasm_memory) {
set_bit_field(IsWasmMemory::update(bit_field(), is_wasm_memory));
}
void JSArrayBuffer::Setup(Handle<JSArrayBuffer> array_buffer, Isolate* isolate,
bool is_external, void* allocation_base,
size_t allocation_length, void* data,
size_t byte_length, SharedFlag shared) {
bool is_external, void* data, size_t byte_length,
SharedFlag shared, bool is_wasm_memory) {
DCHECK_EQ(array_buffer->GetEmbedderFieldCount(),
v8::ArrayBuffer::kEmbedderFieldCount);
for (int i = 0; i < v8::ArrayBuffer::kEmbedderFieldCount; i++) {
@ -19085,6 +19072,7 @@ void JSArrayBuffer::Setup(Handle<JSArrayBuffer> array_buffer, Isolate* isolate,
array_buffer->set_is_external(is_external);
array_buffer->set_is_neuterable(shared == SharedFlag::kNotShared);
array_buffer->set_is_shared(shared == SharedFlag::kShared);
array_buffer->set_is_wasm_memory(is_wasm_memory);
Handle<Object> heap_byte_length =
isolate->factory()->NewNumberFromSize(byte_length);
@ -19096,9 +19084,6 @@ void JSArrayBuffer::Setup(Handle<JSArrayBuffer> array_buffer, Isolate* isolate,
// already been promoted.
array_buffer->set_backing_store(data);
array_buffer->set_allocation_base(allocation_base);
array_buffer->set_allocation_length(allocation_length);
if (data && !is_external) {
isolate->heap()->RegisterNewArrayBuffer(*array_buffer);
}
@ -19160,9 +19145,8 @@ Handle<JSArrayBuffer> JSTypedArray::MaterializeArrayBuffer(
Handle<JSArrayBuffer> buffer(JSArrayBuffer::cast(typed_array->buffer()),
isolate);
// This code does not know how to materialize from a buffer with guard
// regions.
DCHECK(!buffer->has_guard_region());
// This code does not know how to materialize from wasm buffers.
DCHECK(!buffer->is_wasm_memory());
void* backing_store =
isolate->array_buffer_allocator()->AllocateUninitialized(
@ -19176,8 +19160,6 @@ Handle<JSArrayBuffer> JSTypedArray::MaterializeArrayBuffer(
// registration method below handles the case of registering a buffer that has
// already been promoted.
buffer->set_backing_store(backing_store);
buffer->set_allocation_base(backing_store);
buffer->set_allocation_length(NumberToSize(buffer->byte_length()));
// RegisterNewArrayBuffer expects a valid length for adjusting counters.
isolate->heap()->RegisterNewArrayBuffer(*buffer);
memcpy(buffer->backing_store(),
@ -19199,7 +19181,7 @@ Handle<JSArrayBuffer> JSTypedArray::GetBuffer() {
GetIsolate());
if (array_buffer->was_neutered() ||
array_buffer->backing_store() != nullptr ||
array_buffer->has_guard_region()) {
array_buffer->is_wasm_memory()) {
return array_buffer;
}
Handle<JSTypedArray> self(this);

View File

@ -6,6 +6,7 @@
#define V8_OBJECTS_JS_ARRAY_INL_H_
#include "src/objects/js-array.h"
#include "src/wasm/wasm-engine.h"
// Has to be the last include (doesn't have include guards):
#include "src/objects/object-macros.h"
@ -76,30 +77,48 @@ void JSArrayBuffer::set_backing_store(void* value, WriteBarrierMode mode) {
ACCESSORS(JSArrayBuffer, byte_length, Object, kByteLengthOffset)
void* JSArrayBuffer::allocation_base() const {
intptr_t ptr = READ_INTPTR_FIELD(this, kAllocationBaseOffset);
return reinterpret_cast<void*>(ptr);
}
void JSArrayBuffer::set_allocation_base(void* value, WriteBarrierMode mode) {
intptr_t ptr = reinterpret_cast<intptr_t>(value);
WRITE_INTPTR_FIELD(this, kAllocationBaseOffset, ptr);
}
size_t JSArrayBuffer::allocation_length() const {
return *reinterpret_cast<const size_t*>(
FIELD_ADDR_CONST(this, kAllocationLengthOffset));
if (backing_store() == nullptr) {
return 0;
}
// If this buffer is managed by the WasmMemoryTracker
if (is_wasm_memory()) {
const auto* data =
GetIsolate()->wasm_engine()->memory_tracker()->FindAllocationData(
backing_store());
DCHECK_NOT_NULL(data);
return data->allocation_length;
}
return byte_length()->Number();
}
void JSArrayBuffer::set_allocation_length(size_t value) {
(*reinterpret_cast<size_t*>(FIELD_ADDR(this, kAllocationLengthOffset))) =
value;
void* JSArrayBuffer::allocation_base() const {
if (backing_store() == nullptr) {
return nullptr;
}
// If this buffer is managed by the WasmMemoryTracker
if (is_wasm_memory()) {
const auto* data =
GetIsolate()->wasm_engine()->memory_tracker()->FindAllocationData(
backing_store());
DCHECK_NOT_NULL(data);
return data->allocation_base;
}
return backing_store();
}
ArrayBuffer::Allocator::AllocationMode JSArrayBuffer::allocation_mode() const {
using AllocationMode = ArrayBuffer::Allocator::AllocationMode;
return has_guard_region() ? AllocationMode::kReservation
: AllocationMode::kNormal;
return is_wasm_memory() ? AllocationMode::kReservation
: AllocationMode::kNormal;
}
bool JSArrayBuffer::is_wasm_memory() const {
bool const is_wasm_memory = IsWasmMemory::decode(bit_field());
DCHECK(backing_store() == nullptr ||
GetIsolate()->wasm_engine()->memory_tracker()->IsWasmMemory(
backing_store()) == is_wasm_memory);
return is_wasm_memory;
}
void JSArrayBuffer::set_bit_field(uint32_t bits) {
@ -143,14 +162,6 @@ void JSArrayBuffer::set_is_shared(bool value) {
set_bit_field(IsShared::update(bit_field(), value));
}
bool JSArrayBuffer::has_guard_region() const {
return HasGuardRegion::decode(bit_field());
}
void JSArrayBuffer::set_has_guard_region(bool value) {
set_bit_field(HasGuardRegion::update(bit_field(), value));
}
bool JSArrayBuffer::is_growable() { return IsGrowable::decode(bit_field()); }
void JSArrayBuffer::set_is_growable(bool value) {

View File

@ -139,14 +139,8 @@ class JSArrayBuffer : public JSObject {
// [backing_store]: backing memory for this array
DECL_ACCESSORS(backing_store, void)
// [allocation_base]: the start of the memory allocation for this array,
// normally equal to backing_store
DECL_ACCESSORS(allocation_base, void)
// [allocation_length]: the size of the memory allocation for this array,
// normally equal to byte_length
inline size_t allocation_length() const;
inline void set_allocation_length(size_t value);
inline void* allocation_base() const;
inline uint32_t bit_field() const;
inline void set_bit_field(uint32_t bits);
@ -166,9 +160,6 @@ class JSArrayBuffer : public JSObject {
inline bool is_shared();
inline void set_is_shared(bool value);
inline bool has_guard_region() const;
inline void set_has_guard_region(bool value);
inline bool is_growable();
inline void set_is_growable(bool value);
@ -181,26 +172,29 @@ class JSArrayBuffer : public JSObject {
struct Allocation {
using AllocationMode = ArrayBuffer::Allocator::AllocationMode;
Allocation(void* allocation_base, size_t length, AllocationMode mode)
: allocation_base(allocation_base), length(length), mode(mode) {}
Allocation(void* allocation_base, size_t length, void* backing_store,
AllocationMode mode)
: allocation_base(allocation_base),
length(length),
backing_store(backing_store),
mode(mode) {}
void* allocation_base;
size_t length;
void* backing_store;
AllocationMode mode;
};
// Returns whether the buffer is tracked by the WasmMemoryTracker.
inline bool is_wasm_memory() const;
void FreeBackingStore();
static void FreeBackingStore(Isolate* isolate, Allocation allocation);
V8_EXPORT_PRIVATE static void Setup(
Handle<JSArrayBuffer> array_buffer, Isolate* isolate, bool is_external,
void* data, size_t allocated_length,
SharedFlag shared = SharedFlag::kNotShared);
V8_EXPORT_PRIVATE static void Setup(
Handle<JSArrayBuffer> array_buffer, Isolate* isolate, bool is_external,
void* allocation_base, size_t allocation_length, void* data,
size_t byte_length, SharedFlag shared = SharedFlag::kNotShared);
SharedFlag shared = SharedFlag::kNotShared, bool is_wasm_memory = false);
// Returns false if array buffer contents could not be allocated.
// In this case, |array_buffer| will not be set up.
@ -217,10 +211,7 @@ class JSArrayBuffer : public JSObject {
// The rest of the fields are not JSObjects, so they are not iterated over in
// objects-body-descriptors-inl.h.
static const int kBackingStoreOffset = kByteLengthOffset + kPointerSize;
static const int kAllocationBaseOffset = kBackingStoreOffset + kPointerSize;
static const int kAllocationLengthOffset =
kAllocationBaseOffset + kPointerSize;
static const int kBitFieldSlot = kAllocationLengthOffset + kSizetSize;
static const int kBitFieldSlot = kBackingStoreOffset + kPointerSize;
#if V8_TARGET_LITTLE_ENDIAN || !V8_HOST_ARCH_64_BIT
static const int kBitFieldOffset = kBitFieldSlot;
#else
@ -241,10 +232,12 @@ class JSArrayBuffer : public JSObject {
class IsNeuterable : public BitField<bool, 2, 1> {};
class WasNeutered : public BitField<bool, 3, 1> {};
class IsShared : public BitField<bool, 4, 1> {};
class HasGuardRegion : public BitField<bool, 5, 1> {};
class IsGrowable : public BitField<bool, 6, 1> {};
class IsGrowable : public BitField<bool, 5, 1> {};
class IsWasmMemory : public BitField<bool, 6, 1> {};
private:
void set_is_wasm_memory(bool is_wasm_memory);
DISALLOW_IMPLICIT_CONSTRUCTORS(JSArrayBuffer);
};

View File

@ -217,7 +217,7 @@ RUNTIME_FUNCTION(Runtime_WasmExceptionSetElement) {
CHECK_LT(index, Smi::ToInt(values->length()));
CONVERT_SMI_ARG_CHECKED(value, 1);
auto* vals =
reinterpret_cast<uint16_t*>(values->GetBuffer()->allocation_base());
reinterpret_cast<uint16_t*>(values->GetBuffer()->backing_store());
vals[index] = static_cast<uint16_t>(value);
}
}

View File

@ -235,7 +235,6 @@ HeapObject* Deserializer<AllocatorT>::PostProcessNewObject(HeapObject* obj,
void* backing_store = off_heap_backing_stores_[store_index->value()];
buffer->set_backing_store(backing_store);
buffer->set_allocation_base(backing_store);
isolate_->heap()->RegisterNewArrayBuffer(buffer);
}
} else if (obj->IsFixedTypedArrayBase()) {

View File

@ -1652,10 +1652,9 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
// Set up the globals for the new instance.
//--------------------------------------------------------------------------
WasmContext* wasm_context = instance->wasm_context()->get();
MaybeHandle<JSArrayBuffer> old_globals;
uint32_t globals_size = module_->globals_size;
if (globals_size > 0) {
const bool enable_guard_regions = false;
constexpr bool enable_guard_regions = false;
Handle<JSArrayBuffer> global_buffer =
NewArrayBuffer(isolate_, globals_size, enable_guard_regions);
globals_ = global_buffer;
@ -1708,8 +1707,9 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
Handle<JSArrayBuffer> memory = memory_.ToHandleChecked();
memory->set_is_neuterable(false);
DCHECK_IMPLIES(use_trap_handler(),
module_->is_asm_js() || memory->has_guard_region());
DCHECK_IMPLIES(use_trap_handler(), module_->is_asm_js() ||
memory->is_wasm_memory() ||
memory->backing_store() == nullptr);
} else if (initial_pages > 0) {
// Allocate memory if the initial size is more than 0 pages.
memory_ = AllocateMemory(initial_pages);

View File

@ -71,7 +71,7 @@ class V8_EXPORT_PRIVATE WasmEngine {
WasmCodeManager* code_manager() const { return code_manager_.get(); }
WasmAllocationTracker* allocation_tracker() { return &allocation_tracker_; }
WasmMemoryTracker* memory_tracker() { return &memory_tracker_; }
// We register and unregister CancelableTaskManagers that run
// isolate-dependent tasks. These tasks need to be shutdown if the isolate is
@ -84,7 +84,7 @@ class V8_EXPORT_PRIVATE WasmEngine {
private:
CompilationManager compilation_manager_;
std::unique_ptr<WasmCodeManager> code_manager_;
WasmAllocationTracker allocation_tracker_;
WasmMemoryTracker memory_tracker_;
// Contains all CancelableTaskManagers that run tasks that are dependent
// on the isolate.

View File

@ -12,13 +12,14 @@ namespace v8 {
namespace internal {
namespace wasm {
WasmAllocationTracker::~WasmAllocationTracker() {
WasmMemoryTracker::~WasmMemoryTracker() {
// All reserved address space should be released before the allocation tracker
// is destroyed.
DCHECK_EQ(reserved_address_space_, 0u);
DCHECK_EQ(allocated_address_space_, 0u);
}
bool WasmAllocationTracker::ReserveAddressSpace(size_t num_bytes) {
bool WasmMemoryTracker::ReserveAddressSpace(size_t num_bytes) {
// Address space reservations are currently only meaningful using guard
// regions, which is currently only supported on 64-bit systems. On other
// platforms, we always fall back on bounds checks.
@ -28,24 +29,83 @@ bool WasmAllocationTracker::ReserveAddressSpace(size_t num_bytes) {
static constexpr size_t kAddressSpaceLimit = 0x80000000; // 2 GiB
#endif
size_t const old_count = allocated_address_space_.fetch_add(num_bytes);
size_t const old_count = reserved_address_space_.fetch_add(num_bytes);
DCHECK_GE(old_count + num_bytes, old_count);
if (old_count + num_bytes <= kAddressSpaceLimit) {
return true;
}
allocated_address_space_ -= num_bytes;
reserved_address_space_ -= num_bytes;
return false;
}
void WasmAllocationTracker::ReleaseAddressSpace(size_t num_bytes) {
DCHECK_LE(num_bytes, allocated_address_space_);
allocated_address_space_ -= num_bytes;
void WasmMemoryTracker::ReleaseReservation(size_t num_bytes) {
size_t const old_reserved = reserved_address_space_.fetch_sub(num_bytes);
USE(old_reserved);
DCHECK_LE(num_bytes, old_reserved);
DCHECK_GE(old_reserved - num_bytes, allocated_address_space_);
}
void WasmMemoryTracker::RegisterAllocation(void* allocation_base,
size_t allocation_length,
void* buffer_start,
size_t buffer_length) {
// Make sure the caller has reserved the address space before registering the
// allocation.
DCHECK_LE(allocated_address_space_ + allocation_length,
reserved_address_space_);
base::LockGuard<base::Mutex> scope_lock(&mutex_);
allocated_address_space_ += allocation_length;
allocations_.emplace(buffer_start,
AllocationData{allocation_base, allocation_length,
buffer_start, buffer_length});
}
WasmMemoryTracker::AllocationData WasmMemoryTracker::ReleaseAllocation(
const void* buffer_start) {
base::LockGuard<base::Mutex> scope_lock(&mutex_);
auto find_result = allocations_.find(buffer_start);
CHECK_NE(find_result, allocations_.end());
if (find_result != allocations_.end()) {
size_t num_bytes = find_result->second.allocation_length;
DCHECK_LE(num_bytes, reserved_address_space_);
DCHECK_LE(num_bytes, allocated_address_space_);
reserved_address_space_ -= num_bytes;
allocated_address_space_ -= num_bytes;
AllocationData allocation_data = find_result->second;
allocations_.erase(find_result);
return allocation_data;
}
UNREACHABLE();
}
bool WasmMemoryTracker::IsWasmMemory(const void* buffer_start) {
base::LockGuard<base::Mutex> scope_lock(&mutex_);
return allocations_.find(buffer_start) != allocations_.end();
}
const WasmMemoryTracker::AllocationData* WasmMemoryTracker::FindAllocationData(
const void* buffer_start) {
base::LockGuard<base::Mutex> scope_lock(&mutex_);
const auto& result = allocations_.find(buffer_start);
if (result != allocations_.end()) {
return &result->second;
}
return nullptr;
}
void* TryAllocateBackingStore(Isolate* isolate, size_t size,
bool require_guard_regions,
void** allocation_base,
size_t* allocation_length) {
#if V8_TARGET_ARCH_32_BIT
DCHECK(!require_guard_regions);
#endif
// We always allocate the largest possible offset into the heap, so the
// addressable memory after the guard page can be made inaccessible.
*allocation_length = require_guard_regions
@ -53,13 +113,14 @@ void* TryAllocateBackingStore(Isolate* isolate, size_t size,
: base::bits::RoundUpToPowerOfTwo32(RoundUp(
static_cast<uint32_t>(size), kWasmPageSize));
DCHECK_GE(*allocation_length, size);
DCHECK_GE(*allocation_length, kWasmPageSize);
WasmAllocationTracker* const allocation_tracker =
isolate->wasm_engine()->allocation_tracker();
WasmMemoryTracker* const memory_tracker =
isolate->wasm_engine()->memory_tracker();
// Let the WasmAllocationTracker know we are going to reserve a bunch of
// Let the WasmMemoryTracker know we are going to reserve a bunch of
// address space.
if (!allocation_tracker->ReserveAddressSpace(*allocation_length)) {
if (!memory_tracker->ReserveAddressSpace(*allocation_length)) {
// If we are over the address space limit, fail.
return nullptr;
}
@ -68,10 +129,9 @@ void* TryAllocateBackingStore(Isolate* isolate, size_t size,
*allocation_base = AllocatePages(nullptr, *allocation_length, kWasmPageSize,
PageAllocator::kNoAccess);
if (*allocation_base == nullptr) {
allocation_tracker->ReleaseAddressSpace(*allocation_length);
memory_tracker->ReleaseReservation(*allocation_length);
return nullptr;
}
void* memory = *allocation_base;
// Make the part we care about accessible.
@ -81,25 +141,23 @@ void* TryAllocateBackingStore(Isolate* isolate, size_t size,
reinterpret_cast<v8::Isolate*>(isolate)
->AdjustAmountOfExternalAllocatedMemory(size);
memory_tracker->RegisterAllocation(*allocation_base, *allocation_length,
memory, size);
return memory;
}
Handle<JSArrayBuffer> SetupArrayBuffer(Isolate* isolate, void* allocation_base,
size_t allocation_length,
void* backing_store, size_t size,
bool is_external,
bool enable_guard_regions,
Handle<JSArrayBuffer> SetupArrayBuffer(Isolate* isolate, void* backing_store,
size_t size, bool is_external,
SharedFlag shared) {
Handle<JSArrayBuffer> buffer =
isolate->factory()->NewJSArrayBuffer(shared, TENURED);
DCHECK_GE(kMaxInt, size);
if (shared == SharedFlag::kShared) DCHECK(FLAG_experimental_wasm_threads);
JSArrayBuffer::Setup(buffer, isolate, is_external, allocation_base,
allocation_length, backing_store, static_cast<int>(size),
shared);
constexpr bool is_wasm_memory = true;
JSArrayBuffer::Setup(buffer, isolate, is_external, backing_store,
static_cast<int>(size), shared, is_wasm_memory);
buffer->set_is_neuterable(false);
buffer->set_is_growable(true);
buffer->set_has_guard_region(enable_guard_regions);
return buffer;
}
@ -135,10 +193,7 @@ Handle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size,
#endif
constexpr bool is_external = false;
// All buffers have guard regions now, but sometimes they are small.
constexpr bool has_guard_region = true;
return SetupArrayBuffer(isolate, allocation_base, allocation_length, memory,
size, is_external, has_guard_region, shared);
return SetupArrayBuffer(isolate, memory, size, is_external, shared);
}
void DetachMemoryBuffer(Isolate* isolate, Handle<JSArrayBuffer> buffer,

View File

@ -5,6 +5,9 @@
#ifndef V8_WASM_WASM_MEMORY_H_
#define V8_WASM_WASM_MEMORY_H_
#include <unordered_map>
#include "src/base/platform/mutex.h"
#include "src/flags.h"
#include "src/handles.h"
#include "src/objects/js-array.h"
@ -13,10 +16,10 @@ namespace v8 {
namespace internal {
namespace wasm {
class WasmAllocationTracker {
class WasmMemoryTracker {
public:
WasmAllocationTracker() {}
~WasmAllocationTracker();
WasmMemoryTracker() {}
~WasmMemoryTracker();
// ReserveAddressSpace attempts to increase the reserved address space counter
// to determine whether there is enough headroom to allocate another guarded
@ -24,13 +27,69 @@ class WasmAllocationTracker {
// allocate the buffer), false otherwise.
bool ReserveAddressSpace(size_t num_bytes);
// Reduces the address space counter so that the space can be reused.
void ReleaseAddressSpace(size_t num_bytes);
void RegisterAllocation(void* allocation_base, size_t allocation_length,
void* buffer_start, size_t buffer_length);
struct AllocationData {
void* const allocation_base = nullptr;
size_t const allocation_length = 0;
void* const buffer_start = nullptr;
size_t const buffer_length = 0;
private:
AllocationData(void* allocation_base, size_t allocation_length,
void* buffer_start, size_t buffer_length)
: allocation_base(allocation_base),
allocation_length(allocation_length),
buffer_start(buffer_start),
buffer_length(buffer_length) {
DCHECK_LE(reinterpret_cast<uintptr_t>(allocation_base),
reinterpret_cast<uintptr_t>(buffer_start));
DCHECK_GE(
reinterpret_cast<uintptr_t>(allocation_base) + allocation_length,
reinterpret_cast<uintptr_t>(buffer_start));
DCHECK_GE(
reinterpret_cast<uintptr_t>(allocation_base) + allocation_length,
reinterpret_cast<uintptr_t>(buffer_start) + buffer_length);
}
friend WasmMemoryTracker;
};
// Decreases the amount of reserved address space
void ReleaseReservation(size_t num_bytes);
// Removes an allocation from the tracker
AllocationData ReleaseAllocation(const void* buffer_start);
bool IsWasmMemory(const void* buffer_start);
// Returns a pointer to a Wasm buffer's allocation data, or nullptr if the
// buffer is not tracked.
const AllocationData* FindAllocationData(const void* buffer_start);
private:
std::atomic_size_t allocated_address_space_{0};
// Clients use a two-part process. First they "reserve" the address space,
// which signifies an intent to actually allocate it. This determines whether
// doing the allocation would put us over our limit. Once there is a
// reservation, clients can do the allocation and register the result.
//
// We should always have:
// allocated_address_space_ <= reserved_address_space_ <= kAddressSpaceLimit
std::atomic_size_t reserved_address_space_{0};
DISALLOW_COPY_AND_ASSIGN(WasmAllocationTracker);
// Used to protect access to the allocated address space counter and
// allocation map. This is needed because Wasm memories can be freed on
// another thread by the ArrayBufferTracker.
base::Mutex mutex_;
size_t allocated_address_space_{0};
// Track Wasm memory allocation information. This is keyed by the start of the
// buffer, rather than by the start of the allocation.
std::unordered_map<const void*, AllocationData> allocations_;
DISALLOW_COPY_AND_ASSIGN(WasmMemoryTracker);
};
Handle<JSArrayBuffer> NewArrayBuffer(
@ -38,9 +97,8 @@ Handle<JSArrayBuffer> NewArrayBuffer(
SharedFlag shared = SharedFlag::kNotShared);
Handle<JSArrayBuffer> SetupArrayBuffer(
Isolate*, void* allocation_base, size_t allocation_length,
void* backing_store, size_t size, bool is_external,
bool enable_guard_regions, SharedFlag shared = SharedFlag::kNotShared);
Isolate*, void* backing_store, size_t size, bool is_external,
SharedFlag shared = SharedFlag::kNotShared);
void DetachMemoryBuffer(Isolate* isolate, Handle<JSArrayBuffer> buffer,
bool free_memory);

View File

@ -340,8 +340,7 @@ Handle<JSArray> GetCustomSections(Isolate* isolate,
}
Handle<JSArrayBuffer> buffer = isolate->factory()->NewJSArrayBuffer();
constexpr bool is_external = false;
JSArrayBuffer::Setup(buffer, isolate, is_external, memory, size, memory,
size);
JSArrayBuffer::Setup(buffer, isolate, is_external, memory, size);
DisallowHeapAllocation no_gc; // for raw access to string bytes.
Handle<SeqOneByteString> module_bytes(shared->module_bytes(), isolate);
const byte* start =

View File

@ -369,7 +369,8 @@ Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
new_size > kMaxInt) {
return Handle<JSArrayBuffer>::null();
}
if (((use_trap_handler && new_size < old_buffer->allocation_length()) ||
if (((use_trap_handler && !old_buffer->is_external() &&
new_size < old_buffer->allocation_length()) ||
old_size == new_size) &&
old_size != 0) {
DCHECK_NOT_NULL(old_buffer->backing_store());
@ -385,35 +386,21 @@ Handle<JSArrayBuffer> GrowMemoryBuffer(Isolate* isolate,
}
// NOTE: We must allocate a new array buffer here because the spec
// assumes that ArrayBuffers do not change size.
void* allocation_base = old_buffer->allocation_base();
size_t allocation_length = old_buffer->allocation_length();
void* backing_store = old_buffer->backing_store();
bool has_guard_region = old_buffer->has_guard_region();
bool is_external = old_buffer->is_external();
// Disconnect buffer early so GC won't free it.
i::wasm::DetachMemoryBuffer(isolate, old_buffer, false);
Handle<JSArrayBuffer> new_buffer = wasm::SetupArrayBuffer(
isolate, allocation_base, allocation_length, backing_store, new_size,
is_external, has_guard_region);
Handle<JSArrayBuffer> new_buffer =
wasm::SetupArrayBuffer(isolate, backing_store, new_size, is_external);
return new_buffer;
} else {
bool free_memory = false;
Handle<JSArrayBuffer> new_buffer;
if (pages != 0) {
// Allocate a new buffer and memcpy the old contents.
free_memory = true;
new_buffer = wasm::NewArrayBuffer(isolate, new_size, use_trap_handler);
if (new_buffer.is_null() || old_size == 0) return new_buffer;
Address new_mem_start = static_cast<Address>(new_buffer->backing_store());
memcpy(new_mem_start, old_mem_start, old_size);
DCHECK(old_buffer.is_null() || !old_buffer->is_shared());
} else {
// Reuse the prior backing store, but allocate a new array buffer.
new_buffer = wasm::SetupArrayBuffer(
isolate, old_buffer->allocation_base(),
old_buffer->allocation_length(), old_buffer->backing_store(),
new_size, old_buffer->is_external(), old_buffer->has_guard_region());
}
new_buffer = wasm::NewArrayBuffer(isolate, new_size, use_trap_handler);
if (new_buffer.is_null() || old_size == 0) return new_buffer;
Address new_mem_start = static_cast<Address>(new_buffer->backing_store());
memcpy(new_mem_start, old_mem_start, old_size);
DCHECK(old_buffer.is_null() || !old_buffer->is_shared());
constexpr bool free_memory = true;
i::wasm::DetachMemoryBuffer(isolate, old_buffer, free_memory);
return new_buffer;
}
@ -453,11 +440,7 @@ Handle<WasmMemoryObject> WasmMemoryObject::New(
Handle<JSArrayBuffer> buffer;
if (maybe_buffer.is_null()) {
// If no buffer was provided, create a 0-length one.
// TODO(kschimpf): Modify to use argument defining style of
// memory. (see above).
buffer = wasm::SetupArrayBuffer(isolate, nullptr, 0, nullptr, 0, false,
trap_handler::IsTrapHandlerEnabled());
buffer = wasm::SetupArrayBuffer(isolate, nullptr, 0, false);
} else {
buffer = maybe_buffer.ToHandleChecked();
// Paranoid check that the buffer size makes sense.

View File

@ -1005,25 +1005,23 @@ struct ManuallyExternalizedBuffer {
Handle<JSArrayBuffer> buffer_;
void* allocation_base_;
size_t allocation_length_;
bool const should_free_;
ManuallyExternalizedBuffer(JSArrayBuffer* buffer, Isolate* isolate)
: isolate_(isolate),
buffer_(buffer, isolate),
allocation_base_(buffer->allocation_base()),
allocation_length_(buffer->allocation_length()) {
if (!buffer->has_guard_region()) {
allocation_length_(buffer->allocation_length()),
should_free_(!isolate_->wasm_engine()->memory_tracker()->IsWasmMemory(
buffer->backing_store())) {
if (!isolate_->wasm_engine()->memory_tracker()->IsWasmMemory(
buffer->backing_store())) {
v8::Utils::ToLocal(buffer_)->Externalize();
}
}
~ManuallyExternalizedBuffer() {
if (!buffer_->has_guard_region()) {
if (buffer_->allocation_mode() ==
ArrayBuffer::Allocator::AllocationMode::kReservation) {
CHECK(v8::internal::FreePages(allocation_base_, allocation_length_));
} else {
isolate_->array_buffer_allocator()->Free(allocation_base_,
allocation_length_);
}
if (should_free_) {
buffer_->FreeBackingStore();
}
}
};
@ -1081,18 +1079,22 @@ TEST(Run_WasmModule_Buffer_Externalized_GrowMemMemSize) {
{
Isolate* isolate = CcTest::InitIsolateOnce();
HandleScope scope(isolate);
void* backing_store =
isolate->array_buffer_allocator()->Allocate(16 * kWasmPageSize);
Handle<JSArrayBuffer> buffer =
wasm::SetupArrayBuffer(isolate, backing_store, 16 * kWasmPageSize,
backing_store, 16 * kWasmPageSize, false, false);
#if V8_TARGET_ARCH_64_BIT
constexpr bool require_guard_regions = true;
#else
constexpr bool require_guard_regions = false;
#endif
Handle<JSArrayBuffer> buffer = wasm::NewArrayBuffer(
isolate, 16 * kWasmPageSize, require_guard_regions);
Handle<WasmMemoryObject> mem_obj =
WasmMemoryObject::New(isolate, buffer, 100);
v8::Utils::ToLocal(buffer)->Externalize();
auto const contents = v8::Utils::ToLocal(buffer)->Externalize();
int32_t result = WasmMemoryObject::Grow(isolate, mem_obj, 0);
CHECK_EQ(16, result);
isolate->array_buffer_allocator()->Free(backing_store, 16 * kWasmPageSize);
const JSArrayBuffer::Allocation allocation{
contents.AllocationBase(), contents.AllocationLength(), contents.Data(),
contents.AllocationMode()};
JSArrayBuffer::FreeBackingStore(isolate, allocation);
}
Cleanup();
}
@ -1103,14 +1105,19 @@ TEST(Run_WasmModule_Buffer_Externalized_Detach) {
// https://bugs.chromium.org/p/chromium/issues/detail?id=731046
Isolate* isolate = CcTest::InitIsolateOnce();
HandleScope scope(isolate);
void* backing_store =
isolate->array_buffer_allocator()->Allocate(16 * kWasmPageSize);
Handle<JSArrayBuffer> buffer =
wasm::SetupArrayBuffer(isolate, backing_store, 16 * kWasmPageSize,
backing_store, 16 * kWasmPageSize, false, false);
v8::Utils::ToLocal(buffer)->Externalize();
#if V8_TARGET_ARCH_64_BIT
constexpr bool require_guard_regions = true;
#else
constexpr bool require_guard_regions = false;
#endif
Handle<JSArrayBuffer> buffer = wasm::NewArrayBuffer(
isolate, 16 * kWasmPageSize, require_guard_regions);
auto const contents = v8::Utils::ToLocal(buffer)->Externalize();
wasm::DetachMemoryBuffer(isolate, buffer, true);
isolate->array_buffer_allocator()->Free(backing_store, 16 * kWasmPageSize);
const JSArrayBuffer::Allocation allocation{
contents.AllocationBase(), contents.AllocationLength(), contents.Data(),
contents.AllocationMode()};
JSArrayBuffer::FreeBackingStore(isolate, allocation);
}
Cleanup();
}