From bc33f5aeba9ceb13f8bfc401c5ba2521c2207ffb Mon Sep 17 00:00:00 2001 From: "Ben L. Titzer" Date: Thu, 18 Jul 2019 16:21:18 +0200 Subject: [PATCH] Reland "[arraybuffer] Rearchitect backing store ownership" This is a reland of 31cd5d83d34ece8d1917082236b76bfeb09f038b Original change's description: > [arraybuffer] Rearchitect backing store ownership > > This CL completely rearchitects the ownership of array buffer backing stores, > consolidating ownership into a {BackingStore} C++ object that is tracked > throughout V8 using unique_ptr and shared_ptr where appropriate. > > Overall, lifetime management is simpler and more explicit. The numerous > ways that array buffers were initialized have been streamlined to one > Attach() method on JSArrayBuffer. The array buffer tracker in the > GC implementation now manages std::shared_ptr pointers, > and the construction and destruction of the BackingStore object itself > handles the underlying page or embedder-allocated memory. > > The embedder API remains unchanged for now. We use the > v8::ArrayBuffer::Contents struct to hide an additional shared_ptr to > keep the backing store alive properly, even in the case of aliases > from live heap objects. Thus the embedder has a lower chance of making > a mistake. Long-term, we should move the embedder to a model where they > manage backing stores using shared_ptr to an opaque backing store object. > > R=mlippautz@chromium.org > BUG=v8:9380,v8:9221 > > Change-Id: I48fae5ac85dcf6172a83f252439e77e7c1a16ccd > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1584323 > Commit-Queue: Ben Titzer > Reviewed-by: Ben Titzer > Reviewed-by: Michael Starzinger > Reviewed-by: Yang Guo > Reviewed-by: Deepti Gandluri > Reviewed-by: Ulan Degenbaev > Reviewed-by: Michael Lippautz > Cr-Commit-Position: refs/heads/master@{#62572} Bug: v8:9380, v8:9221 Change-Id: If3f72967a8ebeb067c0edcfc16ed631e36829dbc Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1691906 Commit-Queue: Ben Titzer Reviewed-by: Michael Starzinger Reviewed-by: Michael Lippautz Reviewed-by: Deepti Gandluri Reviewed-by: Yang Guo Reviewed-by: Ulan Degenbaev Cr-Commit-Position: refs/heads/master@{#62809} --- BUILD.gn | 4 +- include/v8.h | 4 +- src/api/api.cc | 303 ++++++--- src/asmjs/asm-js.cc | 7 +- src/builtins/builtins-arraybuffer.cc | 37 +- src/d8/d8.cc | 14 + src/d8/d8.h | 2 + src/diagnostics/objects-printer.cc | 1 - src/execution/isolate.cc | 12 +- src/execution/isolate.h | 2 + src/execution/stack-guard.cc | 4 +- src/extensions/free-buffer-extension.cc | 5 +- src/heap/array-buffer-collector.cc | 25 +- src/heap/array-buffer-collector.h | 4 +- src/heap/array-buffer-tracker-inl.h | 104 ++- src/heap/array-buffer-tracker.cc | 30 +- src/heap/array-buffer-tracker.h | 25 +- src/heap/factory.cc | 62 +- src/heap/factory.h | 12 +- src/heap/heap.cc | 14 +- src/heap/heap.h | 12 +- src/heap/setup-heap-internal.cc | 1 + src/heap/spaces.cc | 6 +- src/objects/backing-store.cc | 598 +++++++++++++++++ src/objects/backing-store.h | 203 ++++++ src/objects/js-array-buffer-inl.h | 26 +- src/objects/js-array-buffer.cc | 218 +++--- src/objects/js-array-buffer.h | 79 +-- src/objects/value-serializer.cc | 20 +- src/roots/roots.h | 4 +- src/runtime/runtime-test.cc | 25 +- src/runtime/runtime-typedarray.cc | 15 - src/snapshot/deserializer.cc | 28 +- src/snapshot/deserializer.h | 5 +- src/wasm/c-api.cc | 5 +- src/wasm/module-compiler.cc | 1 - src/wasm/module-instantiate.cc | 201 +++--- src/wasm/wasm-code-manager.cc | 12 +- src/wasm/wasm-code-manager.h | 6 +- src/wasm/wasm-engine.cc | 3 +- src/wasm/wasm-engine.h | 5 +- src/wasm/wasm-js.cc | 13 +- src/wasm/wasm-memory.cc | 633 ------------------ src/wasm/wasm-memory.h | 289 -------- src/wasm/wasm-module.cc | 18 +- src/wasm/wasm-objects.cc | 217 +++--- src/wasm/wasm-objects.h | 9 +- test/cctest/BUILD.gn | 3 + test/cctest/cctest.status | 2 + test/cctest/heap/test-array-buffer-tracker.cc | 4 +- test/cctest/heap/test-page-promotion.cc | 12 +- test/cctest/manually-externalized-buffer.h | 34 + test/cctest/test-api-array-buffer.cc | 10 +- test/cctest/test-backing-store.cc | 85 +++ test/cctest/test-code-stub-assembler.cc | 4 +- test/cctest/test-heap-profiler.cc | 3 +- test/cctest/test-roots.cc | 1 + test/cctest/wasm/test-grow-memory.cc | 131 ++++ test/cctest/wasm/test-run-wasm-module.cc | 149 ----- test/cctest/wasm/test-wasm-serialization.cc | 1 - test/cctest/wasm/wasm-run-utils.cc | 31 +- test/mjsunit/wasm/gc-memory.js | 41 ++ .../compiler/js-typed-lowering-unittest.cc | 7 - .../objects/value-serializer-unittest.cc | 32 +- .../wasm/trap-handler-x64-unittest.cc | 35 +- .../wasm/wasm-code-manager-unittest.cc | 1 - 66 files changed, 2020 insertions(+), 1889 deletions(-) create mode 100644 src/objects/backing-store.cc create mode 100644 src/objects/backing-store.h delete mode 100644 src/wasm/wasm-memory.cc delete mode 100644 src/wasm/wasm-memory.h create mode 100644 test/cctest/manually-externalized-buffer.h create mode 100644 test/cctest/test-backing-store.cc create mode 100644 test/cctest/wasm/test-grow-memory.cc create mode 100644 test/mjsunit/wasm/gc-memory.js diff --git a/BUILD.gn b/BUILD.gn index 9f1440e9e6..faed0599da 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -2424,6 +2424,8 @@ v8_source_set("v8_base_without_compiler") { "src/objects/api-callbacks.h", "src/objects/arguments-inl.h", "src/objects/arguments.h", + "src/objects/backing-store.cc", + "src/objects/backing-store.h", "src/objects/bigint.cc", "src/objects/bigint.h", "src/objects/cell-inl.h", @@ -2917,8 +2919,6 @@ v8_source_set("v8_base_without_compiler") { "src/wasm/wasm-js.h", "src/wasm/wasm-limits.h", "src/wasm/wasm-linkage.h", - "src/wasm/wasm-memory.cc", - "src/wasm/wasm-memory.h", "src/wasm/wasm-module-builder.cc", "src/wasm/wasm-module-builder.h", "src/wasm/wasm-module.cc", diff --git a/include/v8.h b/include/v8.h index 1cb2ba1e68..8562f56f5a 100644 --- a/include/v8.h +++ b/include/v8.h @@ -4785,7 +4785,7 @@ class V8_EXPORT ArrayBuffer : public Object { * * The Data pointer of ArrayBuffer::Contents must be freed using the provided * deleter, which will call ArrayBuffer::Allocator::Free if the buffer - * was allocated with ArraryBuffer::Allocator::Allocate. + * was allocated with ArrayBuffer::Allocator::Allocate. */ Contents Externalize(); @@ -4807,6 +4807,7 @@ class V8_EXPORT ArrayBuffer : public Object { private: ArrayBuffer(); static void CheckCast(Value* obj); + Contents GetContents(bool externalize); }; @@ -5224,6 +5225,7 @@ class V8_EXPORT SharedArrayBuffer : public Object { private: SharedArrayBuffer(); static void CheckCast(Value* obj); + Contents GetContents(bool externalize); }; diff --git a/src/api/api.cc b/src/api/api.cc index e55ff8a911..edadee92f1 100644 --- a/src/api/api.cc +++ b/src/api/api.cc @@ -126,6 +126,8 @@ #endif // V8_TARGET_ARCH_X64 #endif // V8_OS_WIN +#define TRACE_BS(...) /* PrintF(__VA_ARGS__) */ + namespace v8 { /* @@ -7158,20 +7160,75 @@ bool v8::ArrayBuffer::IsDetachable() const { return Utils::OpenHandle(this)->is_detachable(); } -v8::ArrayBuffer::Contents v8::ArrayBuffer::Externalize() { - i::Handle self = Utils::OpenHandle(this); - i::Isolate* isolate = self->GetIsolate(); - Utils::ApiCheck(!self->is_external(), "v8_ArrayBuffer_Externalize", - "ArrayBuffer already externalized"); - self->set_is_external(true); - - const v8::ArrayBuffer::Contents contents = GetContents(); - isolate->heap()->UnregisterArrayBuffer(*self); - - // A regular copy is good enough. No move semantics needed. - return contents; +namespace { +// The backing store deleter just deletes the indirection, which downrefs +// the shared pointer. It will get collected normally. +void BackingStoreDeleter(void* buffer, size_t length, void* info) { + auto bs_indirection = + reinterpret_cast*>(info); + if (bs_indirection) { + auto backing_store = bs_indirection->get(); + TRACE_BS("API:delete bs=%p mem=%p (%zu bytes)\n", backing_store, + backing_store->buffer_start(), backing_store->byte_length()); + USE(backing_store); + } + delete bs_indirection; } +void* MakeDeleterData(std::shared_ptr backing_store) { + if (!backing_store) return nullptr; + TRACE_BS("API:extern bs=%p mem=%p (%zu bytes)\n", backing_store.get(), + backing_store->buffer_start(), backing_store->byte_length()); + return new std::shared_ptr(backing_store); +} + +std::shared_ptr LookupOrCreateBackingStore( + i::Isolate* i_isolate, void* data, size_t byte_length, i::SharedFlag shared, + ArrayBufferCreationMode mode) { + // "internalized" means that the storage was allocated by the + // ArrayBufferAllocator and thus should be freed upon destruction. + bool free_on_destruct = mode == ArrayBufferCreationMode::kInternalized; + + // Try to lookup a previously-registered backing store in the global + // registry. If found, use that instead of wrapping an embedder allocation. + std::shared_ptr backing_store = + i::GlobalBackingStoreRegistry::Lookup(data, byte_length); + + if (backing_store) { + // Check invariants for a previously-found backing store. + + // 1. We cannot allow an embedder to first allocate a backing store that + // should not be freed upon destruct, and then allocate an alias that should + // destruct it. The other order is fine. + bool changing_destruct_mode = + free_on_destruct && !backing_store->free_on_destruct(); + Utils::ApiCheck( + !changing_destruct_mode, "v8_[Shared]ArrayBuffer_New", + "previous backing store found that should not be freed on destruct"); + + // 2. We cannot allow embedders to use the same backing store for both + // SharedArrayBuffers and regular ArrayBuffers. + bool changing_shared_flag = + (shared == i::SharedFlag::kShared) != backing_store->is_shared(); + Utils::ApiCheck( + !changing_shared_flag, "v8_[Shared]ArrayBuffer_New", + "previous backing store found that does not match shared flag"); + } else { + // No previous backing store found. + backing_store = i::BackingStore::WrapAllocation( + i_isolate, data, byte_length, shared, free_on_destruct); + + if (free_on_destruct) { + // The embedder requested free-on-destruct. They already have a + // direct pointer to the buffer start, so globally register the backing + // store in case they come back with the same buffer start. + i::GlobalBackingStoreRegistry::Register(backing_store); + } + } + return backing_store; +} +} // namespace + v8::ArrayBuffer::Contents::Contents(void* data, size_t byte_length, void* allocation_base, size_t allocation_length, @@ -7188,29 +7245,61 @@ v8::ArrayBuffer::Contents::Contents(void* data, size_t byte_length, DCHECK_LE(byte_length_, allocation_length_); } -void WasmMemoryDeleter(void* buffer, size_t lenght, void* info) { - internal::wasm::WasmEngine* engine = - reinterpret_cast(info); - CHECK(engine->memory_tracker()->FreeWasmMemory(nullptr, buffer)); -} - -void ArrayBufferDeleter(void* buffer, size_t length, void* info) { - v8::ArrayBuffer::Allocator* allocator = - reinterpret_cast(info); - allocator->Free(buffer, length); +v8::ArrayBuffer::Contents v8::ArrayBuffer::Externalize() { + return GetContents(true); } v8::ArrayBuffer::Contents v8::ArrayBuffer::GetContents() { + return GetContents(false); +} + +v8::ArrayBuffer::Contents v8::ArrayBuffer::GetContents(bool externalize) { + // TODO(titzer): reduce duplication between shared/unshared GetContents() + using BufferType = v8::ArrayBuffer; + i::Handle self = Utils::OpenHandle(this); - Contents contents( - self->backing_store(), self->byte_length(), self->allocation_base(), - self->allocation_length(), - self->is_wasm_memory() ? Allocator::AllocationMode::kReservation - : Allocator::AllocationMode::kNormal, - self->is_wasm_memory() ? WasmMemoryDeleter : ArrayBufferDeleter, - self->is_wasm_memory() - ? static_cast(self->GetIsolate()->wasm_engine()) - : static_cast(self->GetIsolate()->array_buffer_allocator())); + + std::shared_ptr backing_store = self->GetBackingStore(); + + void* deleter_data = nullptr; + if (externalize) { + Utils::ApiCheck(!self->is_external(), "v8_ArrayBuffer_Externalize", + "ArrayBuffer already externalized"); + self->set_is_external(true); + // When externalizing, upref the shared pointer to the backing store + // and store that as the deleter data. When the embedder calls the deleter + // callback, we will delete the additional (on-heap) shared_ptr. + deleter_data = MakeDeleterData(backing_store); + } + + if (!backing_store) { + // If the array buffer has zero length or was detached, return empty + // contents. + DCHECK_EQ(0, self->byte_length()); + BufferType::Contents contents( + nullptr, 0, nullptr, 0, + v8::ArrayBuffer::Allocator::AllocationMode::kNormal, + BackingStoreDeleter, deleter_data); + return contents; + } + + // Backing stores that given to the embedder might be passed back through + // the API using only the start of the buffer. We need to find such + // backing stores using global registration until the API is changed. + i::GlobalBackingStoreRegistry::Register(backing_store); + + auto allocation_mode = + backing_store->is_wasm_memory() + ? v8::ArrayBuffer::Allocator::AllocationMode::kReservation + : v8::ArrayBuffer::Allocator::AllocationMode::kNormal; + + BufferType::Contents contents(backing_store->buffer_start(), // -- + backing_store->byte_length(), // -- + backing_store->buffer_start(), // -- + backing_store->byte_length(), // -- + allocation_mode, // -- + BackingStoreDeleter, // -- + deleter_data); return contents; } @@ -7235,14 +7324,18 @@ Local v8::ArrayBuffer::New(Isolate* isolate, size_t byte_length) { i::Isolate* i_isolate = reinterpret_cast(isolate); LOG_API(i_isolate, ArrayBuffer, New); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); - i::Handle obj = - i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kNotShared); - // TODO(jbroman): It may be useful in the future to provide a MaybeLocal - // version that throws an exception or otherwise does not crash. - if (!i::JSArrayBuffer::SetupAllocatingData(obj, i_isolate, byte_length)) { + i::MaybeHandle result = + i_isolate->factory()->NewJSArrayBufferAndBackingStore( + byte_length, i::InitializedFlag::kZeroInitialized); + + i::Handle array_buffer; + if (!result.ToHandle(&array_buffer)) { + // TODO(jbroman): It may be useful in the future to provide a MaybeLocal + // version that throws an exception or otherwise does not crash. i::FatalProcessOutOfMemory(i_isolate, "v8::ArrayBuffer::New"); } - return Utils::ToLocal(obj); + + return Utils::ToLocal(array_buffer); } Local v8::ArrayBuffer::New(Isolate* isolate, void* data, @@ -7254,11 +7347,15 @@ Local v8::ArrayBuffer::New(Isolate* isolate, void* data, i::Isolate* i_isolate = reinterpret_cast(isolate); LOG_API(i_isolate, ArrayBuffer, New); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); - i::Handle obj = - i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kNotShared); - i::JSArrayBuffer::Setup(obj, i_isolate, - mode == ArrayBufferCreationMode::kExternalized, data, - byte_length); + + std::shared_ptr backing_store = LookupOrCreateBackingStore( + i_isolate, data, byte_length, i::SharedFlag::kNotShared, mode); + + i::Handle obj = i_isolate->factory()->NewJSArrayBuffer(); + obj->Attach(std::move(backing_store)); + if (mode == ArrayBufferCreationMode::kExternalized) { + obj->set_is_external(true); + } return Utils::ToLocal(obj); } @@ -7301,9 +7398,9 @@ size_t v8::ArrayBufferView::CopyContents(void* dest, size_t byte_length) { bool v8::ArrayBufferView::HasBuffer() const { i::Handle self = Utils::OpenHandle(this); - i::Handle buffer(i::JSArrayBuffer::cast(self->buffer()), - self->GetIsolate()); - return buffer->backing_store() != nullptr; + if (!self->IsJSTypedArray()) return true; + auto typed_array = i::Handle::cast(self); + return !typed_array->is_on_heap(); } size_t v8::ArrayBufferView::ByteOffset() { @@ -7399,13 +7496,17 @@ i::Handle SetupSharedArrayBuffer( i::Isolate* i_isolate = reinterpret_cast(isolate); LOG_API(i_isolate, SharedArrayBuffer, New); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); + + std::shared_ptr backing_store = LookupOrCreateBackingStore( + i_isolate, data, byte_length, i::SharedFlag::kShared, mode); + i::Handle obj = - i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kShared); - bool is_wasm_memory = - i_isolate->wasm_engine()->memory_tracker()->IsWasmMemory(data); - i::JSArrayBuffer::Setup(obj, i_isolate, - mode == ArrayBufferCreationMode::kExternalized, data, - byte_length, i::SharedFlag::kShared, is_wasm_memory); + i_isolate->factory()->NewJSSharedArrayBuffer(); + + obj->Attach(backing_store); + if (mode == ArrayBufferCreationMode::kExternalized) { + obj->set_is_external(true); + } return obj; } @@ -7415,20 +7516,6 @@ bool v8::SharedArrayBuffer::IsExternal() const { return Utils::OpenHandle(this)->is_external(); } -v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::Externalize() { - i::Handle self = Utils::OpenHandle(this); - i::Isolate* isolate = self->GetIsolate(); - Utils::ApiCheck(!self->is_external(), "v8_SharedArrayBuffer_Externalize", - "SharedArrayBuffer already externalized"); - self->set_is_external(true); - - const v8::SharedArrayBuffer::Contents contents = GetContents(); - isolate->heap()->UnregisterArrayBuffer(*self); - - // A regular copy is good enough. No move semantics needed. - return contents; -} - v8::SharedArrayBuffer::Contents::Contents( void* data, size_t byte_length, void* allocation_base, size_t allocation_length, Allocator::AllocationMode allocation_mode, @@ -7444,20 +7531,62 @@ v8::SharedArrayBuffer::Contents::Contents( DCHECK_LE(byte_length_, allocation_length_); } +v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::Externalize() { + return GetContents(true); +} + v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::GetContents() { + return GetContents(false); +} + +v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::GetContents( + bool externalize) { + // TODO(titzer): reduce duplication between shared/unshared GetContents() + using BufferType = v8::SharedArrayBuffer; + i::Handle self = Utils::OpenHandle(this); - Contents contents( - self->backing_store(), self->byte_length(), self->allocation_base(), - self->allocation_length(), - self->is_wasm_memory() - ? ArrayBuffer::Allocator::AllocationMode::kReservation - : ArrayBuffer::Allocator::AllocationMode::kNormal, - self->is_wasm_memory() - ? reinterpret_cast(WasmMemoryDeleter) - : reinterpret_cast(ArrayBufferDeleter), - self->is_wasm_memory() - ? static_cast(self->GetIsolate()->wasm_engine()) - : static_cast(self->GetIsolate()->array_buffer_allocator())); + + std::shared_ptr backing_store = self->GetBackingStore(); + + void* deleter_data = nullptr; + if (externalize) { + Utils::ApiCheck(!self->is_external(), "v8_SharedArrayBuffer_Externalize", + "SharedArrayBuffer already externalized"); + self->set_is_external(true); + // When externalizing, upref the shared pointer to the backing store + // and store that as the deleter data. When the embedder calls the deleter + // callback, we will delete the additional (on-heap) shared_ptr. + deleter_data = MakeDeleterData(backing_store); + } + + if (!backing_store) { + // If the array buffer has zero length or was detached, return empty + // contents. + DCHECK_EQ(0, self->byte_length()); + BufferType::Contents contents( + nullptr, 0, nullptr, 0, + v8::ArrayBuffer::Allocator::AllocationMode::kNormal, + BackingStoreDeleter, deleter_data); + return contents; + } + + // Backing stores that given to the embedder might be passed back through + // the API using only the start of the buffer. We need to find such + // backing stores using global registration until the API is changed. + i::GlobalBackingStoreRegistry::Register(backing_store); + + auto allocation_mode = + backing_store->is_wasm_memory() + ? v8::ArrayBuffer::Allocator::AllocationMode::kReservation + : v8::ArrayBuffer::Allocator::AllocationMode::kNormal; + + BufferType::Contents contents(backing_store->buffer_start(), // -- + backing_store->byte_length(), // -- + backing_store->buffer_start(), // -- + backing_store->byte_length(), // -- + allocation_mode, // -- + BackingStoreDeleter, // -- + deleter_data); return contents; } @@ -7472,14 +7601,20 @@ Local v8::SharedArrayBuffer::New(Isolate* isolate, i::Isolate* i_isolate = reinterpret_cast(isolate); LOG_API(i_isolate, SharedArrayBuffer, New); ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate); - i::Handle obj = - i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kShared); - // TODO(jbroman): It may be useful in the future to provide a MaybeLocal - // version that throws an exception or otherwise does not crash. - if (!i::JSArrayBuffer::SetupAllocatingData(obj, i_isolate, byte_length, true, - i::SharedFlag::kShared)) { + + std::unique_ptr backing_store = + i::BackingStore::Allocate(i_isolate, byte_length, i::SharedFlag::kShared, + i::InitializedFlag::kZeroInitialized); + + if (!backing_store) { + // TODO(jbroman): It may be useful in the future to provide a MaybeLocal + // version that throws an exception or otherwise does not crash. i::FatalProcessOutOfMemory(i_isolate, "v8::SharedArrayBuffer::New"); } + + i::Handle obj = + i_isolate->factory()->NewJSSharedArrayBuffer(); + obj->Attach(std::move(backing_store)); return Utils::ToLocalShared(obj); } @@ -10445,3 +10580,5 @@ void InvokeFunctionCallback(const v8::FunctionCallbackInfo& info, } // namespace internal } // namespace v8 + +#undef TRACE_BS diff --git a/src/asmjs/asm-js.cc b/src/asmjs/asm-js.cc index 164e02d819..9f158a6ef4 100644 --- a/src/asmjs/asm-js.cc +++ b/src/asmjs/asm-js.cc @@ -387,7 +387,12 @@ MaybeHandle AsmJs::InstantiateAsmWasm(Isolate* isolate, ReportInstantiationFailure(script, position, "Requires heap buffer"); return MaybeHandle(); } - wasm_engine->memory_tracker()->MarkWasmMemoryNotGrowable(memory); + // Mark the buffer as being used as an asm.js memory. This implies two + // things: 1) if the buffer is from a Wasm memory, that memory can no longer + // be grown, since that would detach this buffer, and 2) the buffer cannot + // be postMessage()'d, as that also detaches the buffer. + memory->set_is_asmjs_memory(true); + memory->set_is_detachable(false); size_t size = memory->byte_length(); // Check the asm.js heap size against the valid limits. if (!IsValidAsmjsMemorySize(size)) { diff --git a/src/builtins/builtins-arraybuffer.cc b/src/builtins/builtins-arraybuffer.cc index 9ecb1815bc..44f272e7c8 100644 --- a/src/builtins/builtins-arraybuffer.cc +++ b/src/builtins/builtins-arraybuffer.cc @@ -30,29 +30,36 @@ namespace { Object ConstructBuffer(Isolate* isolate, Handle target, Handle new_target, Handle length, - bool initialize) { + InitializedFlag initialized) { Handle result; ASSIGN_RETURN_FAILURE_ON_EXCEPTION( isolate, result, JSObject::New(target, new_target, Handle::null())); + auto array_buffer = Handle::cast(result); + SharedFlag shared = (*target != target->native_context().array_buffer_fun()) + ? SharedFlag::kShared + : SharedFlag::kNotShared; + size_t byte_length; if (!TryNumberToSize(*length, &byte_length) || byte_length > JSArrayBuffer::kMaxByteLength) { - JSArrayBuffer::SetupAsEmpty(Handle::cast(result), isolate); + // ToNumber failed. + array_buffer->SetupEmpty(shared); THROW_NEW_ERROR_RETURN_FAILURE( isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength)); } - SharedFlag shared_flag = - (*target == target->native_context().array_buffer_fun()) - ? SharedFlag::kNotShared - : SharedFlag::kShared; - if (!JSArrayBuffer::SetupAllocatingData(Handle::cast(result), - isolate, byte_length, initialize, - shared_flag)) { - THROW_NEW_ERROR_RETURN_FAILURE( - isolate, NewRangeError(MessageTemplate::kArrayBufferAllocationFailed)); + + auto backing_store = + BackingStore::Allocate(isolate, byte_length, shared, initialized); + if (backing_store) { + array_buffer->Attach(std::move(backing_store)); + return *array_buffer; } - return *result; + + // Allocation of backing store failed. + array_buffer->SetupEmpty(shared); + THROW_NEW_ERROR_RETURN_FAILURE( + isolate, NewRangeError(MessageTemplate::kArrayBufferAllocationFailed)); } } // namespace @@ -80,7 +87,8 @@ BUILTIN(ArrayBufferConstructor) { isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength)); } - return ConstructBuffer(isolate, target, new_target, number_length, true); + return ConstructBuffer(isolate, target, new_target, number_length, + InitializedFlag::kZeroInitialized); } // This is a helper to construct an ArrayBuffer with uinitialized memory. @@ -91,7 +99,8 @@ BUILTIN(ArrayBufferConstructor_DoNotInitialize) { Handle target(isolate->native_context()->array_buffer_fun(), isolate); Handle length = args.atOrUndefined(isolate, 1); - return ConstructBuffer(isolate, target, target, length, false); + return ConstructBuffer(isolate, target, target, length, + InitializedFlag::kUninitialized); } // ES6 section 24.1.4.1 get ArrayBuffer.prototype.byteLength diff --git a/src/d8/d8.cc b/src/d8/d8.cc index bf03573f78..dabd5290b0 100644 --- a/src/d8/d8.cc +++ b/src/d8/d8.cc @@ -70,6 +70,8 @@ #define CHECK(condition) assert(condition) #endif +#define TRACE_BS(...) /* PrintF(__VA_ARGS__) */ + namespace v8 { namespace { @@ -3054,6 +3056,17 @@ class Serializer : public ValueSerializer::Delegate { std::unique_ptr Release() { return std::move(data_); } void AppendExternalizedContentsTo(std::vector* to) { + for (auto& contents : externalized_contents_) { + auto bs_indirection = reinterpret_cast*>( + contents.DeleterData()); + if (bs_indirection) { + auto backing_store = bs_indirection->get(); + TRACE_BS("d8:append bs=%p mem=%p (%zu bytes)\n", backing_store, + backing_store->buffer_start(), backing_store->byte_length()); + USE(backing_store); + } + } + to->insert(to->end(), std::make_move_iterator(externalized_contents_.begin()), std::make_move_iterator(externalized_contents_.end())); @@ -3534,3 +3547,4 @@ int main(int argc, char* argv[]) { return v8::Shell::Main(argc, argv); } #undef CHECK #undef DCHECK +#undef TRACE_BS diff --git a/src/d8/d8.h b/src/d8/d8.h index 1e0dd43c2d..3fa09ffde8 100644 --- a/src/d8/d8.h +++ b/src/d8/d8.h @@ -149,6 +149,8 @@ class ExternalizedContents { } ~ExternalizedContents(); + void* DeleterData() { return deleter_data_; } + private: void* data_; size_t length_; diff --git a/src/diagnostics/objects-printer.cc b/src/diagnostics/objects-printer.cc index 5284208285..d8745741d6 100644 --- a/src/diagnostics/objects-printer.cc +++ b/src/diagnostics/objects-printer.cc @@ -1378,7 +1378,6 @@ void JSArrayBuffer::JSArrayBufferPrint(std::ostream& os) { // NOLINT if (is_detachable()) os << "\n - detachable"; if (was_detached()) os << "\n - detached"; if (is_shared()) os << "\n - shared"; - if (is_wasm_memory()) os << "\n - is_wasm_memory"; JSObjectPrintBody(os, *this, !was_detached()); } diff --git a/src/execution/isolate.cc b/src/execution/isolate.cc index 2b3551cdfb..8e68a8ffc4 100644 --- a/src/execution/isolate.cc +++ b/src/execution/isolate.cc @@ -51,6 +51,7 @@ #include "src/logging/counters.h" #include "src/logging/log.h" #include "src/numbers/hash-seed-inl.h" +#include "src/objects/backing-store.h" #include "src/objects/elements.h" #include "src/objects/frame-array-inl.h" #include "src/objects/hash-table-inl.h" @@ -2981,7 +2982,7 @@ void Isolate::Deinit() { optimizing_compile_dispatcher_ = nullptr; } - wasm_engine()->memory_tracker()->DeleteSharedMemoryObjectsOnIsolate(this); + BackingStore::RemoveSharedWasmMemoryObjects(this); heap_.mark_compact_collector()->EnsureSweepingCompleted(); heap_.memory_allocator()->unmapper()->EnsureUnmappingCompleted(); @@ -4574,6 +4575,15 @@ void Isolate::AddDetachedContext(Handle context) { heap()->set_detached_contexts(*detached_contexts); } +void Isolate::AddSharedWasmMemory(Handle memory_object) { + HandleScope scope(this); + Handle shared_wasm_memories = + factory()->shared_wasm_memories(); + shared_wasm_memories = WeakArrayList::AddToEnd( + this, shared_wasm_memories, MaybeObjectHandle::Weak(memory_object)); + heap()->set_shared_wasm_memories(*shared_wasm_memories); +} + void Isolate::CheckDetachedContextsAfterGC() { HandleScope scope(this); Handle detached_contexts = factory()->detached_contexts(); diff --git a/src/execution/isolate.h b/src/execution/isolate.h index 2ead7bf844..7ed686da3e 100644 --- a/src/execution/isolate.h +++ b/src/execution/isolate.h @@ -1408,6 +1408,8 @@ class Isolate final : private HiddenFactory { void AddDetachedContext(Handle context); void CheckDetachedContextsAfterGC(); + void AddSharedWasmMemory(Handle memory_object); + std::vector* partial_snapshot_cache() { return &partial_snapshot_cache_; } diff --git a/src/execution/stack-guard.cc b/src/execution/stack-guard.cc index e5c24cef1e..61a1d10521 100644 --- a/src/execution/stack-guard.cc +++ b/src/execution/stack-guard.cc @@ -10,6 +10,7 @@ #include "src/execution/runtime-profiler.h" #include "src/execution/simulator.h" #include "src/logging/counters.h" +#include "src/objects/backing-store.h" #include "src/roots/roots-inl.h" #include "src/utils/memcopy.h" #include "src/wasm/wasm-engine.h" @@ -301,8 +302,7 @@ Object StackGuard::HandleInterrupts() { if (TestAndClear(&interrupt_flags, GROW_SHARED_MEMORY)) { TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "V8.WasmGrowSharedMemory"); - isolate_->wasm_engine()->memory_tracker()->UpdateSharedMemoryInstances( - isolate_); + BackingStore::UpdateSharedWasmMemoryObjects(isolate_); } if (TestAndClear(&interrupt_flags, DEOPT_MARKED_ALLOCATION_SITES)) { diff --git a/src/extensions/free-buffer-extension.cc b/src/extensions/free-buffer-extension.cc index 975e9543c8..9fdfe920eb 100644 --- a/src/extensions/free-buffer-extension.cc +++ b/src/extensions/free-buffer-extension.cc @@ -21,9 +21,8 @@ void FreeBufferExtension::FreeBuffer( const v8::FunctionCallbackInfo& args) { v8::Local arrayBuffer = args[0].As(); v8::ArrayBuffer::Contents contents = arrayBuffer->Externalize(); - Isolate* isolate = reinterpret_cast(args.GetIsolate()); - isolate->array_buffer_allocator()->Free(contents.Data(), - contents.ByteLength()); + contents.Deleter()(contents.Data(), contents.ByteLength(), + contents.DeleterData()); } } // namespace internal diff --git a/src/heap/array-buffer-collector.cc b/src/heap/array-buffer-collector.cc index b6d7df8191..672d5e68f0 100644 --- a/src/heap/array-buffer-collector.cc +++ b/src/heap/array-buffer-collector.cc @@ -14,33 +14,22 @@ namespace v8 { namespace internal { -namespace { - -void FreeAllocationsHelper( - Heap* heap, const std::vector& allocations) { - for (JSArrayBuffer::Allocation alloc : allocations) { - JSArrayBuffer::FreeBackingStore(heap->isolate(), alloc); - } -} - -} // namespace - void ArrayBufferCollector::QueueOrFreeGarbageAllocations( - std::vector allocations) { + std::vector> backing_stores) { if (heap_->ShouldReduceMemory()) { - FreeAllocationsHelper(heap_, allocations); + // Destruct the vector, which destructs the std::shared_ptrs, freeing + // the backing stores. + backing_stores.clear(); } else { base::MutexGuard guard(&allocations_mutex_); - allocations_.push_back(std::move(allocations)); + allocations_.push_back(std::move(backing_stores)); } } void ArrayBufferCollector::PerformFreeAllocations() { base::MutexGuard guard(&allocations_mutex_); - for (const std::vector& allocations : - allocations_) { - FreeAllocationsHelper(heap_, allocations); - } + // Destruct the vector, which destructs the vecotr of std::shared_ptrs, + // freeing the backing stores if their refcount drops to zero. allocations_.clear(); } diff --git a/src/heap/array-buffer-collector.h b/src/heap/array-buffer-collector.h index 784092e936..2d060cc595 100644 --- a/src/heap/array-buffer-collector.h +++ b/src/heap/array-buffer-collector.h @@ -31,7 +31,7 @@ class ArrayBufferCollector { // // FreeAllocations() potentially triggers a background task for processing. void QueueOrFreeGarbageAllocations( - std::vector allocations); + std::vector> allocations); // Calls FreeAllocations() on a background thread. void FreeAllocations(); @@ -45,7 +45,7 @@ class ArrayBufferCollector { Heap* const heap_; base::Mutex allocations_mutex_; - std::vector> allocations_; + std::vector>> allocations_; }; } // namespace internal diff --git a/src/heap/array-buffer-tracker-inl.h b/src/heap/array-buffer-tracker-inl.h index 65d3f4a732..c267d856e2 100644 --- a/src/heap/array-buffer-tracker-inl.h +++ b/src/heap/array-buffer-tracker-inl.h @@ -12,16 +12,28 @@ #include "src/objects/js-array-buffer-inl.h" #include "src/objects/objects.h" +#define TRACE_BS(...) /* PrintF(__VA_ARGS__) */ + namespace v8 { namespace internal { -void ArrayBufferTracker::RegisterNew(Heap* heap, JSArrayBuffer buffer) { - if (buffer.backing_store() == nullptr) return; +inline size_t PerIsolateAccountingLength(JSArrayBuffer buffer) { + // TODO(titzer): SharedArrayBuffers and shared WasmMemorys cause problems with + // accounting for per-isolate external memory. In particular, sharing the same + // array buffer or memory multiple times, which happens in stress tests, can + // cause overcounting, leading to GC thrashing. Fix with global accounting? + return buffer.is_shared() ? 0 : buffer.byte_length(); +} + +void ArrayBufferTracker::RegisterNew( + Heap* heap, JSArrayBuffer buffer, + std::shared_ptr backing_store) { + if (!backing_store) return; // ArrayBuffer tracking works only for small objects. DCHECK(!heap->IsLargeObject(buffer)); + DCHECK_EQ(backing_store->buffer_start(), buffer.backing_store()); - const size_t length = buffer.byte_length(); Page* page = Page::FromHeapObject(buffer); { base::MutexGuard guard(page->mutex()); @@ -31,44 +43,63 @@ void ArrayBufferTracker::RegisterNew(Heap* heap, JSArrayBuffer buffer) { tracker = page->local_tracker(); } DCHECK_NOT_NULL(tracker); - tracker->Add(buffer, length); + TRACE_BS("ABT:reg bs=%p mem=%p (%zu bytes) cnt=%zu\n", + backing_store.get(), backing_store->buffer_start(), + backing_store->byte_length(), backing_store.use_count()); + tracker->Add(buffer, std::move(backing_store)); } // TODO(wez): Remove backing-store from external memory accounting. // We may go over the limit of externally allocated memory here. We call the // api function to trigger a GC in this case. + const size_t length = PerIsolateAccountingLength(buffer); reinterpret_cast(heap->isolate()) ->AdjustAmountOfExternalAllocatedMemory(length); } -void ArrayBufferTracker::Unregister(Heap* heap, JSArrayBuffer buffer) { - if (buffer.backing_store() == nullptr) return; +std::shared_ptr ArrayBufferTracker::Unregister( + Heap* heap, JSArrayBuffer buffer) { + std::shared_ptr backing_store; + const size_t length = PerIsolateAccountingLength(buffer); Page* page = Page::FromHeapObject(buffer); - const size_t length = buffer.byte_length(); { base::MutexGuard guard(page->mutex()); LocalArrayBufferTracker* tracker = page->local_tracker(); DCHECK_NOT_NULL(tracker); - tracker->Remove(buffer, length); + backing_store = tracker->Remove(buffer); } // TODO(wez): Remove backing-store from external memory accounting. heap->update_external_memory(-static_cast(length)); + return backing_store; +} + +std::shared_ptr ArrayBufferTracker::Lookup(Heap* heap, + JSArrayBuffer buffer) { + if (buffer.backing_store() == nullptr) return {}; + + Page* page = Page::FromHeapObject(buffer); + base::MutexGuard guard(page->mutex()); + LocalArrayBufferTracker* tracker = page->local_tracker(); + DCHECK_NOT_NULL(tracker); + return tracker->Lookup(buffer); } template void LocalArrayBufferTracker::Free(Callback should_free) { size_t freed_memory = 0; - Isolate* isolate = page_->heap()->isolate(); for (TrackingData::iterator it = array_buffers_.begin(); it != array_buffers_.end();) { // Unchecked cast because the map might already be dead at this point. JSArrayBuffer buffer = JSArrayBuffer::unchecked_cast(it->first); - const size_t length = it->second.length; + const size_t length = PerIsolateAccountingLength(buffer); if (should_free(buffer)) { - JSArrayBuffer::FreeBackingStore(isolate, it->second); + // Destroy the shared pointer, (perhaps) freeing the backing store. + TRACE_BS("ABT:die bs=%p mem=%p (%zu bytes) cnt=%zu\n", it->second.get(), + it->second->buffer_start(), it->second->byte_length(), + it->second.use_count()); it = array_buffers_.erase(it); freed_memory += length; } else { @@ -98,35 +129,60 @@ void ArrayBufferTracker::FreeDead(Page* page, MarkingState* marking_state) { } } -void LocalArrayBufferTracker::Add(JSArrayBuffer buffer, size_t length) { +void LocalArrayBufferTracker::Add(JSArrayBuffer buffer, + std::shared_ptr backing_store) { + auto length = PerIsolateAccountingLength(buffer); page_->IncrementExternalBackingStoreBytes( ExternalBackingStoreType::kArrayBuffer, length); - AddInternal(buffer, length); + AddInternal(buffer, std::move(backing_store)); } -void LocalArrayBufferTracker::AddInternal(JSArrayBuffer buffer, size_t length) { - auto ret = array_buffers_.insert( - {buffer, - {buffer.backing_store(), length, buffer.backing_store(), - buffer.is_wasm_memory()}}); +void LocalArrayBufferTracker::AddInternal( + JSArrayBuffer buffer, std::shared_ptr backing_store) { + auto ret = array_buffers_.insert({buffer, std::move(backing_store)}); USE(ret); // Check that we indeed inserted a new value and did not overwrite an existing // one (which would be a bug). DCHECK(ret.second); } -void LocalArrayBufferTracker::Remove(JSArrayBuffer buffer, size_t length) { +std::shared_ptr LocalArrayBufferTracker::Remove( + JSArrayBuffer buffer) { + TrackingData::iterator it = array_buffers_.find(buffer); + + // Check that we indeed find a key to remove. + DCHECK(it != array_buffers_.end()); + + // Steal the underlying shared pointer before erasing the entry. + std::shared_ptr backing_store = std::move(it->second); + + TRACE_BS("ABT:rm bs=%p mem=%p (%zu bytes) cnt=%zu\n", backing_store.get(), + backing_store->buffer_start(), backing_store->byte_length(), + backing_store.use_count()); + + // Erase the entry. + array_buffers_.erase(it); + + // Update accounting. + auto length = PerIsolateAccountingLength(buffer); page_->DecrementExternalBackingStoreBytes( ExternalBackingStoreType::kArrayBuffer, length); - TrackingData::iterator it = array_buffers_.find(buffer); - // Check that we indeed find a key to remove. - DCHECK(it != array_buffers_.end()); - DCHECK_EQ(length, it->second.length); - array_buffers_.erase(it); + return backing_store; } +std::shared_ptr LocalArrayBufferTracker::Lookup( + JSArrayBuffer buffer) { + TrackingData::iterator it = array_buffers_.find(buffer); + if (it != array_buffers_.end()) { + return it->second; + } + return {}; +} + +#undef TRACE_BS + } // namespace internal } // namespace v8 diff --git a/src/heap/array-buffer-tracker.cc b/src/heap/array-buffer-tracker.cc index 0c04d7b6ae..dba3bbda0c 100644 --- a/src/heap/array-buffer-tracker.cc +++ b/src/heap/array-buffer-tracker.cc @@ -11,6 +11,8 @@ #include "src/heap/heap.h" #include "src/heap/spaces.h" +#define TRACE_BS(...) /* PrintF(__VA_ARGS__) */ + namespace v8 { namespace internal { @@ -20,7 +22,7 @@ LocalArrayBufferTracker::~LocalArrayBufferTracker() { template void LocalArrayBufferTracker::Process(Callback callback) { - std::vector backing_stores_to_free; + std::vector> backing_stores_to_free; TrackingData kept_array_buffers; JSArrayBuffer new_buffer; @@ -32,8 +34,9 @@ void LocalArrayBufferTracker::Process(Callback callback) { DCHECK_EQ(page_, Page::FromHeapObject(old_buffer)); const CallbackResult result = callback(old_buffer, &new_buffer); if (result == kKeepEntry) { - kept_array_buffers.insert(*it); + kept_array_buffers.insert(std::move(*it)); } else if (result == kUpdateEntry) { + DCHECK_EQ(old_buffer.byte_length(), new_buffer.byte_length()); DCHECK(!new_buffer.is_null()); Page* target_page = Page::FromHeapObject(new_buffer); { @@ -44,22 +47,28 @@ void LocalArrayBufferTracker::Process(Callback callback) { tracker = target_page->local_tracker(); } DCHECK_NOT_NULL(tracker); - const size_t length = it->second.length; + const size_t length = PerIsolateAccountingLength(old_buffer); // We should decrement before adding to avoid potential overflows in // the external memory counters. - DCHECK_EQ(it->first.is_wasm_memory(), it->second.is_wasm_memory); - tracker->AddInternal(new_buffer, length); + tracker->AddInternal(new_buffer, std::move(it->second)); MemoryChunk::MoveExternalBackingStoreBytes( ExternalBackingStoreType::kArrayBuffer, static_cast(page_), static_cast(target_page), length); } } else if (result == kRemoveEntry) { - freed_memory += it->second.length; - // We pass backing_store() and stored length to the collector for freeing - // the backing store. Wasm allocations will go through their own tracker - // based on the backing store. - backing_stores_to_free.push_back(it->second); + freed_memory += PerIsolateAccountingLength(old_buffer); + auto backing_store = std::move(it->second); + TRACE_BS("ABT:queue bs=%p mem=%p (%zu bytes) cnt=%zu\n", + backing_store.get(), backing_store->buffer_start(), + backing_store->byte_length(), backing_store.use_count()); + if (!backing_store->is_shared()) { + // Only retain non-shared backing stores. For shared backing stores, + // drop the shared_ptr right away, since this should be cheap, + // as it only updates a refcount, except that last, which will + // destruct it, which is rare. + backing_stores_to_free.push_back(backing_store); + } } else { UNREACHABLE(); } @@ -148,3 +157,4 @@ void ArrayBufferTracker::TearDown(Heap* heap) { } // namespace internal } // namespace v8 +#undef TRACE_BS diff --git a/src/heap/array-buffer-tracker.h b/src/heap/array-buffer-tracker.h index b7950c2506..156c226406 100644 --- a/src/heap/array-buffer-tracker.h +++ b/src/heap/array-buffer-tracker.h @@ -9,6 +9,7 @@ #include "src/base/platform/mutex.h" #include "src/common/globals.h" +#include "src/objects/backing-store.h" #include "src/objects/js-array-buffer.h" #include "src/utils/allocation.h" @@ -31,8 +32,12 @@ class ArrayBufferTracker : public AllStatic { // Register/unregister a new JSArrayBuffer |buffer| for tracking. Guards all // access to the tracker by taking the page lock for the corresponding page. - inline static void RegisterNew(Heap* heap, JSArrayBuffer buffer); - inline static void Unregister(Heap* heap, JSArrayBuffer buffer); + inline static void RegisterNew(Heap* heap, JSArrayBuffer buffer, + std::shared_ptr); + inline static std::shared_ptr Unregister(Heap* heap, + JSArrayBuffer buffer); + inline static std::shared_ptr Lookup(Heap* heap, + JSArrayBuffer buffer); // Identifies all backing store pointers for dead JSArrayBuffers in new space. // Does not take any locks and can only be called during Scavenge. @@ -70,8 +75,10 @@ class LocalArrayBufferTracker { explicit LocalArrayBufferTracker(Page* page) : page_(page) {} ~LocalArrayBufferTracker(); - inline void Add(JSArrayBuffer buffer, size_t length); - inline void Remove(JSArrayBuffer buffer, size_t length); + inline void Add(JSArrayBuffer buffer, + std::shared_ptr backing_store); + inline std::shared_ptr Remove(JSArrayBuffer buffer); + inline std::shared_ptr Lookup(JSArrayBuffer buffer); // Frees up array buffers. // @@ -105,17 +112,13 @@ class LocalArrayBufferTracker { } }; - // Keep track of the backing store and the corresponding length at time of - // registering. The length is accessed from JavaScript and can be a - // HeapNumber. The reason for tracking the length is that in the case of - // length being a HeapNumber, the buffer and its length may be stored on - // different memory pages, making it impossible to guarantee order of freeing. using TrackingData = - std::unordered_map; + std::unordered_map, Hasher>; // Internal version of add that does not update counters. Requires separate // logic for updating external memory counters. - inline void AddInternal(JSArrayBuffer buffer, size_t length); + inline void AddInternal(JSArrayBuffer buffer, + std::shared_ptr backing_store); Page* page_; // The set contains raw heap pointers which are removed by the GC upon diff --git a/src/heap/factory.cc b/src/heap/factory.cc index 19c3665622..110ace8ef4 100644 --- a/src/heap/factory.cc +++ b/src/heap/factory.cc @@ -2067,6 +2067,13 @@ void initialize_length(Handle array, int length) { array->initialize_length(length); } +inline void ZeroEmbedderFields(i::Handle obj) { + auto count = obj->GetEmbedderFieldCount(); + for (int i = 0; i < count; i++) { + obj->SetEmbedderField(i, Smi::kZero); + } +} + } // namespace template @@ -3093,15 +3100,46 @@ Handle Factory::NewSyntheticModule( return module; } -Handle Factory::NewJSArrayBuffer(SharedFlag shared, - AllocationType allocation) { - Handle array_buffer_fun( - shared == SharedFlag::kShared - ? isolate()->native_context()->shared_array_buffer_fun() - : isolate()->native_context()->array_buffer_fun(), +Handle Factory::NewJSArrayBuffer(AllocationType allocation) { + Handle map(isolate()->native_context()->array_buffer_fun().initial_map(), + isolate()); + auto result = + Handle::cast(NewJSObjectFromMap(map, allocation)); + ZeroEmbedderFields(result); + result->SetupEmpty(SharedFlag::kNotShared); + return result; +} + +MaybeHandle Factory::NewJSArrayBufferAndBackingStore( + size_t byte_length, InitializedFlag initialized, + AllocationType allocation) { + // TODO(titzer): Don't bother allocating a 0-length backing store. + // This is currently required because the embedder API for + // TypedArray::HasBuffer() checks if the backing store is nullptr. + // That check should be changed. + + std::unique_ptr backing_store = BackingStore::Allocate( + isolate(), byte_length, SharedFlag::kNotShared, initialized); + if (!backing_store) return MaybeHandle(); + Handle map(isolate()->native_context()->array_buffer_fun().initial_map(), + isolate()); + auto array_buffer = + Handle::cast(NewJSObjectFromMap(map, allocation)); + array_buffer->Attach(std::move(backing_store)); + ZeroEmbedderFields(array_buffer); + return array_buffer; +} + +Handle Factory::NewJSSharedArrayBuffer( + AllocationType allocation) { + Handle map( + isolate()->native_context()->shared_array_buffer_fun().initial_map(), isolate()); - Handle map(array_buffer_fun->initial_map(), isolate()); - return Handle::cast(NewJSObjectFromMap(map, allocation)); + auto result = + Handle::cast(NewJSObjectFromMap(map, allocation)); + ZeroEmbedderFields(result); + result->SetupEmpty(SharedFlag::kShared); + return result; } Handle Factory::NewJSIteratorResult(Handle value, @@ -3190,9 +3228,7 @@ Handle Factory::NewJSArrayBufferView( array_buffer_view->set_buffer(*buffer); array_buffer_view->set_byte_offset(byte_offset); array_buffer_view->set_byte_length(byte_length); - for (int i = 0; i < v8::ArrayBufferView::kEmbedderFieldCount; i++) { - array_buffer_view->SetEmbedderField(i, Smi::kZero); - } + ZeroEmbedderFields(array_buffer_view); DCHECK_EQ(array_buffer_view->GetEmbedderFieldCount(), v8::ArrayBufferView::kEmbedderFieldCount); return array_buffer_view; @@ -4148,9 +4184,7 @@ Handle Factory::NewJSPromiseWithoutHook(AllocationType allocation) { NewJSObject(isolate()->promise_function(), allocation)); promise->set_reactions_or_result(Smi::kZero); promise->set_flags(0); - for (int i = 0; i < v8::Promise::kEmbedderFieldCount; i++) { - promise->SetEmbedderField(i, Smi::kZero); - } + ZeroEmbedderFields(promise); return promise; } diff --git a/src/heap/factory.h b/src/heap/factory.h index 3ccbe6856f..f0f6c30fe4 100644 --- a/src/heap/factory.h +++ b/src/heap/factory.h @@ -75,7 +75,8 @@ class WeakCell; struct SourceRange; template class ZoneVector; -enum class SharedFlag : uint32_t; +enum class SharedFlag : uint8_t; +enum class InitializedFlag : uint8_t; enum FunctionMode { kWithNameBit = 1 << 0, @@ -696,7 +697,14 @@ class V8_EXPORT_PRIVATE Factory { v8::Module::SyntheticModuleEvaluationSteps evaluation_steps); Handle NewJSArrayBuffer( - SharedFlag shared, AllocationType allocation = AllocationType::kYoung); + AllocationType allocation = AllocationType::kYoung); + + MaybeHandle NewJSArrayBufferAndBackingStore( + size_t byte_length, InitializedFlag initialized, + AllocationType allocation = AllocationType::kYoung); + + Handle NewJSSharedArrayBuffer( + AllocationType allocation = AllocationType::kYoung); static void TypeAndSizeForElementsKind(ElementsKind kind, ExternalArrayType* array_type, diff --git a/src/heap/heap.cc b/src/heap/heap.cc index 371db912f7..bf0a29d46a 100644 --- a/src/heap/heap.cc +++ b/src/heap/heap.cc @@ -2792,12 +2792,18 @@ HeapObject Heap::AlignWithFiller(HeapObject object, int object_size, return object; } -void Heap::RegisterNewArrayBuffer(JSArrayBuffer buffer) { - ArrayBufferTracker::RegisterNew(this, buffer); +void Heap::RegisterBackingStore(JSArrayBuffer buffer, + std::shared_ptr backing_store) { + ArrayBufferTracker::RegisterNew(this, buffer, std::move(backing_store)); } -void Heap::UnregisterArrayBuffer(JSArrayBuffer buffer) { - ArrayBufferTracker::Unregister(this, buffer); +std::shared_ptr Heap::UnregisterBackingStore( + JSArrayBuffer buffer) { + return ArrayBufferTracker::Unregister(this, buffer); +} + +std::shared_ptr Heap::LookupBackingStore(JSArrayBuffer buffer) { + return ArrayBufferTracker::Lookup(this, buffer); } void Heap::ConfigureInitialOldGenerationSize() { diff --git a/src/heap/heap.h b/src/heap/heap.h index 739f4808ea..e76c2733e9 100644 --- a/src/heap/heap.h +++ b/src/heap/heap.h @@ -45,6 +45,7 @@ class TestMemoryAllocatorScope; } // namespace heap class IncrementalMarking; +class BackingStore; class JSArrayBuffer; using v8::MemoryPressureLevel; @@ -1212,13 +1213,10 @@ class Heap { // =========================================================================== // ArrayBuffer tracking. ===================================================== // =========================================================================== - - // TODO(gc): API usability: encapsulate mutation of JSArrayBuffer::is_external - // in the registration/unregistration APIs. Consider dropping the "New" from - // "RegisterNewArrayBuffer" because one can re-register a previously - // unregistered buffer, too, and the name is confusing. - void RegisterNewArrayBuffer(JSArrayBuffer buffer); - void UnregisterArrayBuffer(JSArrayBuffer buffer); + void RegisterBackingStore(JSArrayBuffer buffer, + std::shared_ptr backing_store); + std::shared_ptr UnregisterBackingStore(JSArrayBuffer buffer); + std::shared_ptr LookupBackingStore(JSArrayBuffer buffer); // =========================================================================== // Allocation site tracking. ================================================= diff --git a/src/heap/setup-heap-internal.cc b/src/heap/setup-heap-internal.cc index a936521a7e..93650b7c0a 100644 --- a/src/heap/setup-heap-internal.cc +++ b/src/heap/setup-heap-internal.cc @@ -784,6 +784,7 @@ void Heap::CreateInitialObjects() { set_feedback_vectors_for_profiling_tools(roots.undefined_value()); set_pending_optimize_for_test_bytecode(roots.undefined_value()); + set_shared_wasm_memories(roots.empty_weak_array_list()); set_script_list(roots.empty_weak_array_list()); diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc index 4c50a337f2..cd6338cb49 100644 --- a/src/heap/spaces.cc +++ b/src/heap/spaces.cc @@ -13,7 +13,7 @@ #include "src/base/platform/semaphore.h" #include "src/base/template-utils.h" #include "src/execution/vm-state-inl.h" -#include "src/heap/array-buffer-tracker.h" +#include "src/heap/array-buffer-tracker-inl.h" #include "src/heap/combined-heap.h" #include "src/heap/concurrent-marking.h" #include "src/heap/gc-tracer.h" @@ -2068,7 +2068,7 @@ void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) { } else if (object.IsJSArrayBuffer()) { JSArrayBuffer array_buffer = JSArrayBuffer::cast(object); if (ArrayBufferTracker::IsTracked(array_buffer)) { - size_t size = array_buffer.byte_length(); + size_t size = PerIsolateAccountingLength(array_buffer); external_page_bytes[ExternalBackingStoreType::kArrayBuffer] += size; } } @@ -2557,7 +2557,7 @@ void NewSpace::Verify(Isolate* isolate) { } else if (object.IsJSArrayBuffer()) { JSArrayBuffer array_buffer = JSArrayBuffer::cast(object); if (ArrayBufferTracker::IsTracked(array_buffer)) { - size_t size = array_buffer.byte_length(); + size_t size = PerIsolateAccountingLength(array_buffer); external_space_bytes[ExternalBackingStoreType::kArrayBuffer] += size; } } diff --git a/src/objects/backing-store.cc b/src/objects/backing-store.cc new file mode 100644 index 0000000000..43b25ff9fc --- /dev/null +++ b/src/objects/backing-store.cc @@ -0,0 +1,598 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/objects/backing-store.h" +#include "src/execution/isolate.h" +#include "src/handles/global-handles.h" +#include "src/logging/counters.h" +#include "src/wasm/wasm-engine.h" +#include "src/wasm/wasm-limits.h" +#include "src/wasm/wasm-objects-inl.h" + +#define TRACE_BS(...) /* PrintF(__VA_ARGS__); */ + +namespace v8 { +namespace internal { + +namespace { +#if V8_TARGET_ARCH_64_BIT +constexpr bool kUseGuardRegions = true; +#else +constexpr bool kUseGuardRegions = false; +#endif + +#if V8_TARGET_ARCH_MIPS64 +// MIPS64 has a user space of 2^40 bytes on most processors, +// address space limits needs to be smaller. +constexpr size_t kAddressSpaceLimit = 0x4000000000L; // 256 GiB +#elif V8_TARGET_ARCH_64_BIT +constexpr size_t kAddressSpaceLimit = 0x10100000000L; // 1 TiB + 4 GiB +#else +constexpr size_t kAddressSpaceLimit = 0xC0000000; // 3 GiB +#endif + +constexpr uint64_t GB = 1024 * 1024 * 1024; +constexpr uint64_t kNegativeGuardSize = 2 * GB; +constexpr uint64_t kFullGuardSize = 10 * GB; + +std::atomic reserved_address_space_{0}; + +// Allocation results are reported to UMA +// +// See wasm_memory_allocation_result in counters.h +enum class AllocationStatus { + kSuccess, // Succeeded on the first try + + kSuccessAfterRetry, // Succeeded after garbage collection + + kAddressSpaceLimitReachedFailure, // Failed because Wasm is at its address + // space limit + + kOtherFailure // Failed for an unknown reason +}; + +base::AddressRegion GetGuardedRegion(void* buffer_start, size_t byte_length) { + // Guard regions always look like this: + // |xxx(2GiB)xxx|.......(4GiB)..xxxxx|xxxxxx(4GiB)xxxxxx| + // ^ buffer_start + // ^ byte_length + // ^ negative guard region ^ positive guard region + + Address start = reinterpret_cast
(buffer_start); + DCHECK_EQ(8, sizeof(size_t)); // only use on 64-bit + DCHECK_EQ(0, start % AllocatePageSize()); + return base::AddressRegion(start - (2 * GB), + static_cast(kFullGuardSize)); +} + +void RecordStatus(Isolate* isolate, AllocationStatus status) { + isolate->counters()->wasm_memory_allocation_result()->AddSample( + static_cast(status)); +} + +inline void DebugCheckZero(void* start, size_t byte_length) { +#if DEBUG + // Double check memory is zero-initialized. + const byte* bytes = reinterpret_cast(start); + for (size_t i = 0; i < byte_length; i++) { + DCHECK_EQ(0, bytes[i]); + } +#endif +} +} // namespace + +bool BackingStore::ReserveAddressSpace(uint64_t num_bytes) { + uint64_t reservation_limit = kAddressSpaceLimit; + while (true) { + uint64_t old_count = reserved_address_space_.load(); + if (old_count > reservation_limit) return false; + if (reservation_limit - old_count < num_bytes) return false; + if (reserved_address_space_.compare_exchange_weak(old_count, + old_count + num_bytes)) { + return true; + } + } +} + +void BackingStore::ReleaseReservation(uint64_t num_bytes) { + uint64_t old_reserved = reserved_address_space_.fetch_sub(num_bytes); + USE(old_reserved); + DCHECK_LE(num_bytes, old_reserved); +} + +// The backing store for a Wasm shared memory remembers all the isolates +// with which it has been shared. +struct SharedWasmMemoryData { + std::vector isolates_; +}; + +void BackingStore::Clear() { + buffer_start_ = nullptr; + byte_length_ = 0; + has_guard_regions_ = false; + type_specific_data_.v8_api_array_buffer_allocator = nullptr; +} + +BackingStore::~BackingStore() { + if (globally_registered_) { + GlobalBackingStoreRegistry::Unregister(this); + globally_registered_ = false; + } + + if (buffer_start_ == nullptr) return; // nothing to deallocate + + if (is_wasm_memory_) { + TRACE_BS("BSw:free bs=%p mem=%p (%zu bytes)\n", this, buffer_start_, + byte_capacity_); + if (is_shared_) { + // Deallocate the list of attached memory objects. + SharedWasmMemoryData* shared_data = get_shared_wasm_memory_data(); + delete shared_data; + type_specific_data_.shared_wasm_memory_data = nullptr; + } + + // Wasm memories are always allocated through the page allocator. + auto region = + has_guard_regions_ + ? GetGuardedRegion(buffer_start_, byte_length_) + : base::AddressRegion(reinterpret_cast
(buffer_start_), + byte_capacity_); + bool pages_were_freed = + region.size() == 0 /* no need to free any pages */ || + FreePages(GetPlatformPageAllocator(), + reinterpret_cast(region.begin()), region.size()); + CHECK(pages_were_freed); + BackingStore::ReleaseReservation(has_guard_regions_ ? kFullGuardSize + : byte_capacity_); + Clear(); + return; + } + if (free_on_destruct_) { + // JSArrayBuffer backing store. Deallocate through the embedder's allocator. + auto allocator = reinterpret_cast( + get_v8_api_array_buffer_allocator()); + TRACE_BS("BS:free bs=%p mem=%p (%zu bytes)\n", this, buffer_start_, + byte_capacity_); + allocator->Free(buffer_start_, byte_length_); + } + Clear(); +} + +// Allocate a backing store using the array buffer allocator from the embedder. +std::unique_ptr BackingStore::Allocate( + Isolate* isolate, size_t byte_length, SharedFlag shared, + InitializedFlag initialized) { + void* buffer_start = nullptr; + auto allocator = isolate->array_buffer_allocator(); + CHECK_NOT_NULL(allocator); + if (byte_length != 0) { + auto counters = isolate->counters(); + int mb_length = static_cast(byte_length / MB); + if (mb_length > 0) { + counters->array_buffer_big_allocations()->AddSample(mb_length); + } + if (shared == SharedFlag::kShared) { + counters->shared_array_allocations()->AddSample(mb_length); + } + if (initialized == InitializedFlag::kZeroInitialized) { + buffer_start = allocator->Allocate(byte_length); + if (buffer_start) { + // TODO(wasm): node does not implement the zero-initialization API. + // Reenable this debug check when node does implement it properly. + constexpr bool + kDebugCheckZeroDisabledDueToNodeNotImplementingZeroInitAPI = true; + if ((!(kDebugCheckZeroDisabledDueToNodeNotImplementingZeroInitAPI)) && + !FLAG_mock_arraybuffer_allocator) { + DebugCheckZero(buffer_start, byte_length); + } + } + } else { + buffer_start = allocator->AllocateUninitialized(byte_length); + } + if (buffer_start == nullptr) { + // Allocation failed. + counters->array_buffer_new_size_failures()->AddSample(mb_length); + return {}; + } + } + + auto result = new BackingStore(buffer_start, // start + byte_length, // length + byte_length, // capacity + shared, // shared + false, // is_wasm_memory + true, // free_on_destruct + false); // has_guard_regions + + TRACE_BS("BS:alloc bs=%p mem=%p (%zu bytes)\n", result, + result->buffer_start(), byte_length); + result->type_specific_data_.v8_api_array_buffer_allocator = allocator; + return std::unique_ptr(result); +} + +// Allocate a backing store for a Wasm memory. Always use the page allocator +// and add guard regions. +std::unique_ptr BackingStore::TryAllocateWasmMemory( + Isolate* isolate, size_t initial_pages, size_t maximum_pages, + SharedFlag shared) { + bool guards = kUseGuardRegions; + + // For accounting purposes, whether a GC was necessary. + bool did_retry = false; + + // A helper to try running a function up to 3 times, executing a GC + // if the first and second attempts failed. + auto gc_retry = [&](const std::function& fn) { + for (int i = 0; i < 3; i++) { + if (fn()) return true; + // Collect garbage and retry. + did_retry = true; + // TODO(wasm): try Heap::EagerlyFreeExternalMemory() first? + isolate->heap()->MemoryPressureNotification( + MemoryPressureLevel::kCritical, true); + } + return false; + }; + + // Compute size of reserved memory. + size_t reservation_size = 0; + size_t byte_capacity = 0; + + if (guards) { + reservation_size = static_cast(kFullGuardSize); + byte_capacity = + static_cast(wasm::kV8MaxWasmMemoryPages * wasm::kWasmPageSize); + } else { + reservation_size = std::min(maximum_pages, wasm::kV8MaxWasmMemoryPages) * + wasm::kWasmPageSize; + byte_capacity = reservation_size; + } + + //-------------------------------------------------------------------------- + // 1. Enforce maximum address space reservation per engine. + //-------------------------------------------------------------------------- + auto reserve_memory_space = [&] { + return BackingStore::ReserveAddressSpace(reservation_size); + }; + + if (!gc_retry(reserve_memory_space)) { + // Crash on out-of-memory if the correctness fuzzer is running. + if (FLAG_correctness_fuzzer_suppressions) { + FATAL("could not allocate wasm memory backing store"); + } + RecordStatus(isolate, AllocationStatus::kAddressSpaceLimitReachedFailure); + return {}; + } + + //-------------------------------------------------------------------------- + // 2. Allocate pages (inaccessible by default). + //-------------------------------------------------------------------------- + void* allocation_base = nullptr; + auto allocate_pages = [&] { + allocation_base = + AllocatePages(GetPlatformPageAllocator(), nullptr, reservation_size, + wasm::kWasmPageSize, PageAllocator::kNoAccess); + return allocation_base != nullptr; + }; + if (!gc_retry(allocate_pages)) { + // Page allocator could not reserve enough pages. + BackingStore::ReleaseReservation(reservation_size); + RecordStatus(isolate, AllocationStatus::kOtherFailure); + return {}; + } + + // Get a pointer to the start of the buffer, skipping negative guard region + // if necessary. + byte* buffer_start = reinterpret_cast(allocation_base) + + (guards ? kNegativeGuardSize : 0); + + //-------------------------------------------------------------------------- + // 3. Commit the initial pages (allow read/write). + //-------------------------------------------------------------------------- + size_t byte_length = initial_pages * wasm::kWasmPageSize; + auto commit_memory = [&] { + return byte_length == 0 || + SetPermissions(GetPlatformPageAllocator(), buffer_start, byte_length, + PageAllocator::kReadWrite); + }; + if (!gc_retry(commit_memory)) { + // SetPermissions put us over the process memory limit. + V8::FatalProcessOutOfMemory(nullptr, "BackingStore::AllocateWasmMemory()"); + } + + DebugCheckZero(buffer_start, byte_length); // touch the bytes. + + RecordStatus(isolate, did_retry ? AllocationStatus::kSuccessAfterRetry + : AllocationStatus::kSuccess); + + auto result = new BackingStore(buffer_start, // start + byte_length, // length + byte_capacity, // capacity + shared, // shared + true, // is_wasm_memory + true, // free_on_destruct + guards); // has_guard_regions + + TRACE_BS("BSw:alloc bs=%p mem=%p (%zu bytes)\n", result, + result->buffer_start(), byte_length); + + // Shared Wasm memories need an anchor for the memory object list. + if (shared == SharedFlag::kShared) { + result->type_specific_data_.shared_wasm_memory_data = + new SharedWasmMemoryData(); + } + + return std::unique_ptr(result); +} + +// Allocate a backing store for a Wasm memory. Always use the page allocator +// and add guard regions. +std::unique_ptr BackingStore::AllocateWasmMemory( + Isolate* isolate, size_t initial_pages, size_t maximum_pages, + SharedFlag shared) { + // Wasm pages must be a multiple of the allocation page size. + DCHECK_EQ(0, wasm::kWasmPageSize % AllocatePageSize()); + + // Enforce engine limitation on the maximum number of pages. + if (initial_pages > wasm::kV8MaxWasmMemoryPages) return nullptr; + + auto backing_store = + TryAllocateWasmMemory(isolate, initial_pages, maximum_pages, shared); + if (!backing_store && maximum_pages > initial_pages) { + // If allocating the maximum failed, try allocating with maximum set to + // initial + backing_store = + TryAllocateWasmMemory(isolate, initial_pages, initial_pages, shared); + } + return backing_store; +} + +std::unique_ptr BackingStore::CopyWasmMemory( + Isolate* isolate, std::shared_ptr old, + size_t new_byte_length) { + DCHECK_GE(new_byte_length, old->byte_length()); + // Note that we could allocate uninitialized to save initialization cost here, + // but since Wasm memories are allocated by the page allocator, the zeroing + // cost is already built-in. + // TODO(titzer): should we use a suitable maximum here? + auto new_backing_store = BackingStore::AllocateWasmMemory( + isolate, new_byte_length / wasm::kWasmPageSize, + new_byte_length / wasm::kWasmPageSize, + old->is_shared() ? SharedFlag::kShared : SharedFlag::kNotShared); + + if (!new_backing_store || + new_backing_store->has_guard_regions() != old->has_guard_regions()) { + return {}; + } + + size_t old_size = old->byte_length(); + if (old_size > 0) { + memcpy(new_backing_store->buffer_start(), old->buffer_start(), old_size); + } + + return new_backing_store; +} + +// Try to grow the size of a wasm memory in place, without realloc + copy. +bool BackingStore::GrowWasmMemoryInPlace(Isolate* isolate, + size_t new_byte_length) { + DCHECK(is_wasm_memory_); + DCHECK_EQ(0, new_byte_length % wasm::kWasmPageSize); + if (new_byte_length <= byte_length_) { + return true; // already big enough. + } + if (byte_capacity_ < new_byte_length) { + return false; // not enough capacity. + } + // Try to adjust the guard regions. + DCHECK_NOT_NULL(buffer_start_); + // If adjusting permissions fails, propagate error back to return + // failure to grow. + if (!i::SetPermissions(GetPlatformPageAllocator(), buffer_start_, + new_byte_length, PageAllocator::kReadWrite)) { + return false; + } + reinterpret_cast(isolate) + ->AdjustAmountOfExternalAllocatedMemory(new_byte_length - byte_length_); + byte_length_ = new_byte_length; + return true; +} + +void BackingStore::AttachSharedWasmMemoryObject( + Isolate* isolate, Handle memory_object) { + DCHECK(is_wasm_memory_); + DCHECK(is_shared_); + // We need to take the global registry lock for this operation. + GlobalBackingStoreRegistry::AddSharedWasmMemoryObject(isolate, this, + memory_object); +} + +void BackingStore::BroadcastSharedWasmMemoryGrow( + Isolate* isolate, std::shared_ptr backing_store, + size_t new_size) { + // requires the global registry lock. + GlobalBackingStoreRegistry::BroadcastSharedWasmMemoryGrow( + isolate, backing_store, new_size); +} + +void BackingStore::RemoveSharedWasmMemoryObjects(Isolate* isolate) { + // requires the global registry lock. + GlobalBackingStoreRegistry::Purge(isolate); +} + +void BackingStore::UpdateSharedWasmMemoryObjects(Isolate* isolate) { + // requires the global registry lock. + GlobalBackingStoreRegistry::UpdateSharedWasmMemoryObjects(isolate); +} + +std::unique_ptr BackingStore::WrapAllocation( + Isolate* isolate, void* allocation_base, size_t allocation_length, + SharedFlag shared, bool free_on_destruct) { + auto result = + new BackingStore(allocation_base, allocation_length, allocation_length, + shared, false, free_on_destruct, false); + result->type_specific_data_.v8_api_array_buffer_allocator = + isolate->array_buffer_allocator(); + TRACE_BS("BS:wrap bs=%p mem=%p (%zu bytes)\n", result, + result->buffer_start(), result->byte_length()); + return std::unique_ptr(result); +} + +void* BackingStore::get_v8_api_array_buffer_allocator() { + CHECK(!is_wasm_memory_); + auto array_buffer_allocator = + type_specific_data_.v8_api_array_buffer_allocator; + CHECK_NOT_NULL(array_buffer_allocator); + return array_buffer_allocator; +} + +SharedWasmMemoryData* BackingStore::get_shared_wasm_memory_data() { + CHECK(is_wasm_memory_ && is_shared_); + auto shared_wasm_memory_data = type_specific_data_.shared_wasm_memory_data; + CHECK(shared_wasm_memory_data); + return shared_wasm_memory_data; +} + +namespace { +// Implementation details of GlobalBackingStoreRegistry. +struct GlobalBackingStoreRegistryImpl { + GlobalBackingStoreRegistryImpl() {} + base::Mutex mutex_; + std::unordered_map> map_; +}; +base::LazyInstance::type global_registry_impl_ = + LAZY_INSTANCE_INITIALIZER; +inline GlobalBackingStoreRegistryImpl* impl() { + return global_registry_impl_.Pointer(); +} +} // namespace + +void GlobalBackingStoreRegistry::Register( + std::shared_ptr backing_store) { + if (!backing_store) return; + + base::MutexGuard scope_lock(&impl()->mutex_); + if (backing_store->globally_registered_) return; + TRACE_BS("BS:reg bs=%p mem=%p (%zu bytes)\n", backing_store.get(), + backing_store->buffer_start(), backing_store->byte_length()); + std::weak_ptr weak = backing_store; + auto result = impl()->map_.insert({backing_store->buffer_start(), weak}); + CHECK(result.second); + backing_store->globally_registered_ = true; +} + +void GlobalBackingStoreRegistry::Unregister(BackingStore* backing_store) { + if (!backing_store->globally_registered_) return; + + base::MutexGuard scope_lock(&impl()->mutex_); + const auto& result = impl()->map_.find(backing_store->buffer_start()); + if (result != impl()->map_.end()) { + auto shared = result->second.lock(); + if (shared) { + DCHECK_EQ(backing_store, shared.get()); + } + impl()->map_.erase(result); + } + backing_store->globally_registered_ = false; +} + +std::shared_ptr GlobalBackingStoreRegistry::Lookup( + void* buffer_start, size_t length) { + base::MutexGuard scope_lock(&impl()->mutex_); + TRACE_BS("BS:lookup mem=%p (%zu bytes)\n", buffer_start, length); + const auto& result = impl()->map_.find(buffer_start); + if (result == impl()->map_.end()) { + return std::shared_ptr(); + } + auto backing_store = result->second.lock(); + DCHECK_EQ(buffer_start, backing_store->buffer_start()); + DCHECK_EQ(length, backing_store->byte_length()); + return backing_store; +} + +void GlobalBackingStoreRegistry::Purge(Isolate* isolate) { + base::MutexGuard scope_lock(&impl()->mutex_); + // Purge all entries in the map that refer to the given isolate. + for (auto& entry : impl()->map_) { + auto backing_store = entry.second.lock(); + if (!backing_store) continue; // skip entries where weak ptr is null + if (!backing_store->is_wasm_memory()) continue; // skip non-wasm memory + SharedWasmMemoryData* shared_data = + backing_store->get_shared_wasm_memory_data(); + // Remove this isolate from the isolates list. + auto& isolates = shared_data->isolates_; + for (size_t i = 0; i < isolates.size(); i++) { + if (isolates[i] == isolate) isolates[i] = nullptr; + } + } +} + +void GlobalBackingStoreRegistry::AddSharedWasmMemoryObject( + Isolate* isolate, BackingStore* backing_store, + Handle memory_object) { + // Add to the weak array list of shared memory objects in the isolate. + isolate->AddSharedWasmMemory(memory_object); + + // Add the isolate to the list of isolates sharing this backing store. + base::MutexGuard scope_lock(&impl()->mutex_); + SharedWasmMemoryData* shared_data = + backing_store->get_shared_wasm_memory_data(); + auto& isolates = shared_data->isolates_; + int free_entry = -1; + for (size_t i = 0; i < isolates.size(); i++) { + if (isolates[i] == isolate) return; + if (isolates[i] == nullptr) free_entry = static_cast(i); + } + if (free_entry >= 0) + isolates[free_entry] = isolate; + else + isolates.push_back(isolate); +} + +void GlobalBackingStoreRegistry::BroadcastSharedWasmMemoryGrow( + Isolate* isolate, std::shared_ptr backing_store, + size_t new_size) { + { + // The global lock protects the list of isolates per backing store. + base::MutexGuard scope_lock(&impl()->mutex_); + SharedWasmMemoryData* shared_data = + backing_store->get_shared_wasm_memory_data(); + for (Isolate* other : shared_data->isolates_) { + if (other && other != isolate) { + other->stack_guard()->RequestGrowSharedMemory(); + } + } + } + // Update memory objects in this isolate. + UpdateSharedWasmMemoryObjects(isolate); +} + +void GlobalBackingStoreRegistry::UpdateSharedWasmMemoryObjects( + Isolate* isolate) { + HandleScope scope(isolate); + Handle shared_wasm_memories = + isolate->factory()->shared_wasm_memories(); + + for (int i = 0; i < shared_wasm_memories->length(); i++) { + HeapObject obj; + if (!shared_wasm_memories->Get(i).GetHeapObject(&obj)) continue; + + Handle memory_object(WasmMemoryObject::cast(obj), + isolate); + Handle old_buffer(memory_object->array_buffer(), isolate); + std::shared_ptr backing_store = old_buffer->GetBackingStore(); + + if (old_buffer->byte_length() != backing_store->byte_length()) { + Handle new_buffer = + isolate->factory()->NewJSSharedArrayBuffer(); + new_buffer->Attach(backing_store); + memory_object->update_instances(isolate, new_buffer); + } + } +} + +} // namespace internal +} // namespace v8 + +#undef TRACE_BS diff --git a/src/objects/backing-store.h b/src/objects/backing-store.h new file mode 100644 index 0000000000..0ba8c4448d --- /dev/null +++ b/src/objects/backing-store.h @@ -0,0 +1,203 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_OBJECTS_BACKING_STORE_H_ +#define V8_OBJECTS_BACKING_STORE_H_ + +#include "src/handles/handles.h" + +namespace v8 { +namespace internal { + +class Isolate; +class WasmMemoryObject; + +// Whether the backing store is shared or not. +enum class SharedFlag : uint8_t { kNotShared, kShared }; + +// Whether the backing store memory is initialied to zero or not. +enum class InitializedFlag : uint8_t { kUninitialized, kZeroInitialized }; + +// Internal information for shared wasm memories. E.g. contains +// a list of all memory objects (across all isolates) that share this +// backing store. +struct SharedWasmMemoryData; + +// The {BackingStore} data structure stores all the low-level details about the +// backing store of an array buffer or Wasm memory, including its base address +// and length, whether it is shared, provided by the embedder, has guard +// regions, etc. Instances of this classes *own* the underlying memory +// when they are created through one of the {Allocate()} methods below, +// and the destructor frees the memory (and page allocation if necessary). +// Backing stores can also *wrap* embedder-allocated memory. In this case, +// they do not own the memory, and upon destruction, they do not deallocate it. +class V8_EXPORT_PRIVATE BackingStore { + public: + ~BackingStore(); + + // Allocate an array buffer backing store using the default method, + // which currently is the embedder-provided array buffer allocator. + static std::unique_ptr Allocate(Isolate* isolate, + size_t byte_length, + SharedFlag shared, + InitializedFlag initialized); + + // Allocate the backing store for a Wasm memory. + static std::unique_ptr AllocateWasmMemory(Isolate* isolate, + size_t initial_pages, + size_t maximum_pages, + SharedFlag shared); + + // Allocate a new, larger, backing store for a Wasm memory and copy the + // contents of this backing store into it. + static std::unique_ptr CopyWasmMemory( + Isolate* isolate, std::shared_ptr old, + size_t new_byte_length); + + // Reallocate the backing store for a Wasm memory. Either readjust the + // size of the given backing store or allocate a new one and copy. + static std::unique_ptr ReallocateWasmMemory( + std::unique_ptr existing, size_t new_byte_length); + + // Create a backing store that wraps existing allocated memory. + // If {free_on_destruct} is {true}, the memory will be freed using the + // ArrayBufferAllocator::Free() callback when this backing store is + // destructed. Otherwise destructing the backing store will do nothing + // to the allocated memory. + static std::unique_ptr WrapAllocation(Isolate* isolate, + void* allocation_base, + size_t allocation_length, + SharedFlag shared, + bool free_on_destruct); + + // Accessors. + void* buffer_start() const { return buffer_start_; } + size_t byte_length() const { return byte_length_; } + size_t byte_capacity() const { return byte_length_; } + bool is_shared() const { return is_shared_; } + bool is_wasm_memory() const { return is_wasm_memory_; } + bool has_guard_regions() const { return has_guard_regions_; } + bool free_on_destruct() const { return free_on_destruct_; } + + // Attempt to grow this backing store in place. + bool GrowWasmMemoryInPlace(Isolate* isolate, size_t new_byte_length); + + // Attach the given memory object to this backing store. The memory object + // will be updated if this backing store is grown. + void AttachSharedWasmMemoryObject(Isolate* isolate, + Handle memory_object); + + // Send asynchronous updates to attached memory objects in other isolates + // after the backing store has been grown. Memory objects in this + // isolate are updated synchronously. + static void BroadcastSharedWasmMemoryGrow(Isolate* isolate, + std::shared_ptr, + size_t new_size); + + // TODO(wasm): address space limitations should be enforced in page alloc. + // These methods enforce a limit on the total amount of address space, + // which is used for both backing stores and wasm memory. + static bool ReserveAddressSpace(uint64_t num_bytes); + static void ReleaseReservation(uint64_t num_bytes); + + // Remove all memory objects in the given isolate that refer to this + // backing store. + static void RemoveSharedWasmMemoryObjects(Isolate* isolate); + + // Update all shared memory objects in this isolate (after a grow operation). + static void UpdateSharedWasmMemoryObjects(Isolate* isolate); + + private: + friend class GlobalBackingStoreRegistry; + + BackingStore(void* buffer_start, size_t byte_length, size_t byte_capacity, + SharedFlag shared, bool is_wasm_memory, bool free_on_destruct, + bool has_guard_regions) + : buffer_start_(buffer_start), + byte_length_(byte_length), + byte_capacity_(byte_capacity), + is_shared_(shared == SharedFlag::kShared), + is_wasm_memory_(is_wasm_memory), + free_on_destruct_(free_on_destruct), + has_guard_regions_(has_guard_regions), + globally_registered_(false) { + type_specific_data_.v8_api_array_buffer_allocator = nullptr; + } + + void* buffer_start_ = nullptr; + size_t byte_length_ = 0; + size_t byte_capacity_ = 0; + bool is_shared_ : 1; + bool is_wasm_memory_ : 1; + bool free_on_destruct_ : 1; + bool has_guard_regions_ : 1; + bool globally_registered_ : 1; + + union { + // If this backing store was allocated through the ArrayBufferAllocator API, + // this is a direct pointer to the API object for freeing the backing + // store. + // Note: we use {void*} here because we cannot forward-declare an inner + // class from the API. + void* v8_api_array_buffer_allocator; + + // For shared Wasm memories, this is a list of all the attached memory + // objects, which is needed to grow shared backing stores. + SharedWasmMemoryData* shared_wasm_memory_data; + } type_specific_data_; + + // Accessors for type-specific data. + void* get_v8_api_array_buffer_allocator(); + SharedWasmMemoryData* get_shared_wasm_memory_data(); + + void Clear(); // Internally clears fields after deallocation. + static std::unique_ptr TryAllocateWasmMemory( + Isolate* isolate, size_t initial_pages, size_t maximum_pages, + SharedFlag shared); + + DISALLOW_COPY_AND_ASSIGN(BackingStore); +}; + +// A global, per-process mapping from buffer addresses to backing stores. +// This is generally only used for dealing with an embedder that has not +// migrated to the new API which should use proper pointers to manage +// backing stores. +class GlobalBackingStoreRegistry { + public: + // Register a backing store in the global registry. A mapping from the + // {buffer_start} to the backing store object will be added. The backing + // store will automatically unregister itself upon destruction. + static void Register(std::shared_ptr backing_store); + + // Look up a backing store based on the {buffer_start} pointer. + static std::shared_ptr Lookup(void* buffer_start, + size_t length); + + private: + friend class BackingStore; + // Unregister a backing store in the global registry. + static void Unregister(BackingStore* backing_store); + + // Adds the given memory object to the backing store's weak list + // of memory objects (under the registry lock). + static void AddSharedWasmMemoryObject(Isolate* isolate, + BackingStore* backing_store, + Handle memory_object); + + // Purge any shared wasm memory lists that refer to this isolate. + static void Purge(Isolate* isolate); + + // Broadcast updates to all attached memory objects. + static void BroadcastSharedWasmMemoryGrow( + Isolate* isolate, std::shared_ptr backing_store, + size_t new_size); + + // Update all shared memory objects in the given isolate. + static void UpdateSharedWasmMemoryObjects(Isolate* isolate); +}; + +} // namespace internal +} // namespace v8 + +#endif // V8_OBJECTS_BACKING_STORE_H_ diff --git a/src/objects/js-array-buffer-inl.h b/src/objects/js-array-buffer-inl.h index 061fec10f7..6b5399677e 100644 --- a/src/objects/js-array-buffer-inl.h +++ b/src/objects/js-array-buffer-inl.h @@ -48,14 +48,6 @@ size_t JSArrayBuffer::allocation_length() const { if (backing_store() == nullptr) { return 0; } - // If this buffer is managed by the WasmMemoryTracker - if (is_wasm_memory()) { - const auto* data = - GetIsolate()->wasm_engine()->memory_tracker()->FindAllocationData( - backing_store()); - DCHECK_NOT_NULL(data); - return data->allocation_length; - } return byte_length(); } @@ -63,25 +55,9 @@ void* JSArrayBuffer::allocation_base() const { if (backing_store() == nullptr) { return nullptr; } - // If this buffer is managed by the WasmMemoryTracker - if (is_wasm_memory()) { - const auto* data = - GetIsolate()->wasm_engine()->memory_tracker()->FindAllocationData( - backing_store()); - DCHECK_NOT_NULL(data); - return data->allocation_base; - } return backing_store(); } -bool JSArrayBuffer::is_wasm_memory() const { - return IsWasmMemoryBit::decode(bit_field()); -} - -void JSArrayBuffer::set_is_wasm_memory(bool is_wasm_memory) { - set_bit_field(IsWasmMemoryBit::update(bit_field(), is_wasm_memory)); -} - void JSArrayBuffer::clear_padding() { if (FIELD_SIZE(kOptionalPaddingOffset) != 0) { DCHECK_EQ(4, FIELD_SIZE(kOptionalPaddingOffset)); @@ -105,6 +81,8 @@ BIT_FIELD_ACCESSORS(JSArrayBuffer, bit_field, is_detachable, JSArrayBuffer::IsDetachableBit) BIT_FIELD_ACCESSORS(JSArrayBuffer, bit_field, was_detached, JSArrayBuffer::WasDetachedBit) +BIT_FIELD_ACCESSORS(JSArrayBuffer, bit_field, is_asmjs_memory, + JSArrayBuffer::IsAsmJsMemoryBit) BIT_FIELD_ACCESSORS(JSArrayBuffer, bit_field, is_shared, JSArrayBuffer::IsSharedBit) diff --git a/src/objects/js-array-buffer.cc b/src/objects/js-array-buffer.cc index a506920f95..be1773ecfd 100644 --- a/src/objects/js-array-buffer.cc +++ b/src/objects/js-array-buffer.cc @@ -31,167 +31,103 @@ bool CanonicalNumericIndexString(Isolate* isolate, Handle s, *index = result; return true; } - -inline int ConvertToMb(size_t size) { - return static_cast(size / static_cast(MB)); -} - } // anonymous namespace -void JSArrayBuffer::Detach() { - CHECK(is_detachable()); - CHECK(!was_detached()); - CHECK(is_external()); +void JSArrayBuffer::SetupEmpty(SharedFlag shared) { + clear_padding(); + set_bit_field(0); + set_is_shared(shared == SharedFlag::kShared); + set_is_detachable(shared != SharedFlag::kShared); set_backing_store(nullptr); set_byte_length(0); - set_was_detached(true); - set_is_detachable(false); - // Invalidate the detaching protector. +} + +std::shared_ptr JSArrayBuffer::Detach( + bool force_for_wasm_memory) { + if (was_detached()) return nullptr; + + if (force_for_wasm_memory) { + // Skip the is_detachable() check. + } else if (!is_detachable()) { + // Not detachable, do nothing. + return nullptr; + } + Isolate* const isolate = GetIsolate(); + auto backing_store = isolate->heap()->UnregisterBackingStore(*this); + CHECK_IMPLIES(force_for_wasm_memory && backing_store, + backing_store->is_wasm_memory()); + if (isolate->IsArrayBufferDetachingIntact()) { isolate->InvalidateArrayBufferDetachingProtector(); } -} -void JSArrayBuffer::FreeBackingStoreFromMainThread() { - if (allocation_base() == nullptr) { - return; - } - FreeBackingStore(GetIsolate(), {allocation_base(), allocation_length(), - backing_store(), is_wasm_memory()}); - // Zero out the backing store and allocation base to avoid dangling - // pointers. + DCHECK(!is_shared()); + DCHECK(!is_asmjs_memory()); set_backing_store(nullptr); + set_byte_length(0); + set_was_detached(true); + + return backing_store; } -// static -void JSArrayBuffer::FreeBackingStore(Isolate* isolate, Allocation allocation) { - if (allocation.is_wasm_memory) { - wasm::WasmMemoryTracker* memory_tracker = - isolate->wasm_engine()->memory_tracker(); - memory_tracker->FreeWasmMemory(isolate, allocation.backing_store); - } else { - isolate->array_buffer_allocator()->Free(allocation.allocation_base, - allocation.length); - } +void JSArrayBuffer::Attach(std::shared_ptr backing_store) { + SetupEmpty(backing_store->is_shared() ? SharedFlag::kShared + : SharedFlag::kNotShared); + + if (backing_store->is_wasm_memory()) set_is_detachable(false); + + set_backing_store(backing_store->buffer_start()); + set_byte_length(backing_store->byte_length()); + if (!backing_store->free_on_destruct()) set_is_external(true); + + GetIsolate()->heap()->RegisterBackingStore(*this, std::move(backing_store)); } -void JSArrayBuffer::Setup(Handle array_buffer, Isolate* isolate, - bool is_external, void* data, size_t byte_length, - SharedFlag shared_flag, bool is_wasm_memory) { - DCHECK_EQ(array_buffer->GetEmbedderFieldCount(), - v8::ArrayBuffer::kEmbedderFieldCount); - DCHECK_LE(byte_length, JSArrayBuffer::kMaxByteLength); - for (int i = 0; i < v8::ArrayBuffer::kEmbedderFieldCount; i++) { - array_buffer->SetEmbedderField(i, Smi::kZero); - } - array_buffer->set_byte_length(byte_length); - array_buffer->set_bit_field(0); - array_buffer->clear_padding(); - array_buffer->set_is_external(is_external); - array_buffer->set_is_detachable(shared_flag == SharedFlag::kNotShared); - array_buffer->set_is_shared(shared_flag == SharedFlag::kShared); - array_buffer->set_is_wasm_memory(is_wasm_memory); - // Initialize backing store at last to avoid handling of |JSArrayBuffers| that - // are currently being constructed in the |ArrayBufferTracker|. The - // registration method below handles the case of registering a buffer that has - // already been promoted. - array_buffer->set_backing_store(data); - - if (data && !is_external) { - isolate->heap()->RegisterNewArrayBuffer(*array_buffer); - } -} - -void JSArrayBuffer::SetupAsEmpty(Handle array_buffer, - Isolate* isolate) { - Setup(array_buffer, isolate, false, nullptr, 0, SharedFlag::kNotShared); -} - -bool JSArrayBuffer::SetupAllocatingData(Handle array_buffer, - Isolate* isolate, - size_t allocated_length, - bool initialize, - SharedFlag shared_flag) { - void* data; - CHECK_NOT_NULL(isolate->array_buffer_allocator()); - if (allocated_length != 0) { - if (allocated_length >= MB) - isolate->counters()->array_buffer_big_allocations()->AddSample( - ConvertToMb(allocated_length)); - if (shared_flag == SharedFlag::kShared) - isolate->counters()->shared_array_allocations()->AddSample( - ConvertToMb(allocated_length)); - if (initialize) { - data = isolate->array_buffer_allocator()->Allocate(allocated_length); - } else { - data = isolate->array_buffer_allocator()->AllocateUninitialized( - allocated_length); - } - if (data == nullptr) { - isolate->counters()->array_buffer_new_size_failures()->AddSample( - ConvertToMb(allocated_length)); - SetupAsEmpty(array_buffer, isolate); - return false; - } - } else { - data = nullptr; - } - - const bool is_external = false; - JSArrayBuffer::Setup(array_buffer, isolate, is_external, data, - allocated_length, shared_flag); - return true; -} - -Handle JSTypedArray::MaterializeArrayBuffer( - Handle typed_array) { - DCHECK(typed_array->is_on_heap()); - - Isolate* isolate = typed_array->GetIsolate(); - - DCHECK(IsTypedArrayElementsKind(typed_array->GetElementsKind())); - - Handle buffer(JSArrayBuffer::cast(typed_array->buffer()), - isolate); - // This code does not know how to materialize from wasm buffers. - DCHECK(!buffer->is_wasm_memory()); - - void* backing_store = - isolate->array_buffer_allocator()->AllocateUninitialized( - typed_array->byte_length()); - if (backing_store == nullptr) { - isolate->heap()->FatalProcessOutOfMemory( - "JSTypedArray::MaterializeArrayBuffer"); - } - buffer->set_is_external(false); - DCHECK_EQ(buffer->byte_length(), typed_array->byte_length()); - // Initialize backing store at last to avoid handling of |JSArrayBuffers| that - // are currently being constructed in the |ArrayBufferTracker|. The - // registration method below handles the case of registering a buffer that has - // already been promoted. - buffer->set_backing_store(backing_store); - // RegisterNewArrayBuffer expects a valid length for adjusting counters. - isolate->heap()->RegisterNewArrayBuffer(*buffer); - memcpy(buffer->backing_store(), typed_array->DataPtr(), - typed_array->byte_length()); - - typed_array->set_elements(ReadOnlyRoots(isolate).empty_byte_array()); - typed_array->set_external_pointer(backing_store); - typed_array->set_base_pointer(Smi::kZero); - DCHECK(!typed_array->is_on_heap()); - - return buffer; +std::shared_ptr JSArrayBuffer::GetBackingStore() { + return GetIsolate()->heap()->LookupBackingStore(*this); } Handle JSTypedArray::GetBuffer() { + Isolate* isolate = GetIsolate(); + Handle self(*this, isolate); + DCHECK(IsTypedArrayElementsKind(self->GetElementsKind())); + + Handle array_buffer(JSArrayBuffer::cast(self->buffer()), + isolate); if (!is_on_heap()) { - Handle array_buffer(JSArrayBuffer::cast(buffer()), - GetIsolate()); + // Already is off heap, so return the existing buffer. return array_buffer; } - Handle self(*this, GetIsolate()); - return MaterializeArrayBuffer(self); + + // The existing array buffer should be empty. + DCHECK_NULL(array_buffer->backing_store()); + + // Allocate a new backing store and attach it to the existing array buffer. + size_t byte_length = self->byte_length(); + auto backing_store = + BackingStore::Allocate(isolate, byte_length, SharedFlag::kNotShared, + InitializedFlag::kUninitialized); + + if (!backing_store) { + isolate->heap()->FatalProcessOutOfMemory("JSTypedArray::GetBuffer"); + } + + // Copy the elements into the backing store of the array buffer. + if (byte_length > 0) { + memcpy(backing_store->buffer_start(), self->DataPtr(), byte_length); + } + + // Attach the backing store to the array buffer. + array_buffer->Attach(std::move(backing_store)); + + // Clear the elements of the typed array. + self->set_elements(ReadOnlyRoots(isolate).empty_byte_array()); + self->set_external_pointer(array_buffer->backing_store()); + self->set_base_pointer(Smi::kZero); + DCHECK(!self->is_on_heap()); + + return array_buffer; } // ES#sec-integer-indexed-exotic-objects-defineownproperty-p-desc diff --git a/src/objects/js-array-buffer.h b/src/objects/js-array-buffer.h index 7bf2e1ae94..4675d5f048 100644 --- a/src/objects/js-array-buffer.h +++ b/src/objects/js-array-buffer.h @@ -5,6 +5,7 @@ #ifndef V8_OBJECTS_JS_ARRAY_BUFFER_H_ #define V8_OBJECTS_JS_ARRAY_BUFFER_H_ +#include "src/objects/backing-store.h" #include "src/objects/js-objects.h" // Has to be the last include (doesn't have include guards): @@ -13,9 +14,6 @@ namespace v8 { namespace internal { -// Whether a JSArrayBuffer is a SharedArrayBuffer or not. -enum class SharedFlag : uint32_t { kNotShared, kShared }; - class JSArrayBuffer : public JSObject { public: // The maximum length for JSArrayBuffer's supported by V8. @@ -51,8 +49,8 @@ class JSArrayBuffer : public JSObject { V(IsExternalBit, bool, 1, _) \ V(IsDetachableBit, bool, 1, _) \ V(WasDetachedBit, bool, 1, _) \ - V(IsSharedBit, bool, 1, _) \ - V(IsWasmMemoryBit, bool, 1, _) + V(IsAsmJsMemoryBit, bool, 1, _) \ + V(IsSharedBit, bool, 1, _) DEFINE_BIT_FIELDS(JS_ARRAY_BUFFER_BIT_FIELD_FIELDS) #undef JS_ARRAY_BUFFER_BIT_FIELD_FIELDS @@ -61,57 +59,49 @@ class JSArrayBuffer : public JSObject { // memory block once all ArrayBuffers referencing it are collected by the GC. DECL_BOOLEAN_ACCESSORS(is_external) - // [is_detachable]: false indicates that this buffer cannot be detached. + // [is_detachable]: false => this buffer cannot be detached. DECL_BOOLEAN_ACCESSORS(is_detachable) - // [was_detached]: true if the buffer was previously detached. + // [was_detached]: true => the buffer was previously detached. DECL_BOOLEAN_ACCESSORS(was_detached) + // [is_asmjs_memory]: true => this buffer was once used as asm.js memory. + DECL_BOOLEAN_ACCESSORS(is_asmjs_memory) + // [is_shared]: tells whether this is an ArrayBuffer or a SharedArrayBuffer. DECL_BOOLEAN_ACCESSORS(is_shared) - // [is_wasm_memory]: whether the buffer is tracked by the WasmMemoryTracker. - DECL_BOOLEAN_ACCESSORS(is_wasm_memory) - DECL_CAST(JSArrayBuffer) - void Detach(); + // Immediately after creating an array buffer, the internal untagged fields + // are garbage. They need to be initialized with either {SetupEmpty()} or + // have a backing store attached via {Attach()}. - struct Allocation { - Allocation(void* allocation_base, size_t length, void* backing_store, - bool is_wasm_memory) - : allocation_base(allocation_base), - length(length), - backing_store(backing_store), - is_wasm_memory(is_wasm_memory) {} + // Setup an array buffer with no backing store. + V8_EXPORT_PRIVATE void SetupEmpty(SharedFlag shared); - void* allocation_base; - size_t length; - void* backing_store; - bool is_wasm_memory; - }; + // Attach a backing store to this array buffer. + // (note: this registers it with src/heap/array-buffer-tracker.h) + V8_EXPORT_PRIVATE void Attach(std::shared_ptr backing_store); - V8_EXPORT_PRIVATE void FreeBackingStoreFromMainThread(); - V8_EXPORT_PRIVATE static void FreeBackingStore(Isolate* isolate, - Allocation allocation); + // Detach the backing store from this array buffer if it is detachable + // and return a reference to the backing store object. This sets the + // internal pointer and length to 0 and unregisters the backing store + // from the array buffer tracker. + // If the array buffer is not detachable, this is a nop. + // + // Array buffers that wrap wasm memory objects are special in that they + // are normally not detachable, but can become detached as a side effect + // of growing the underlying memory object. The {force_for_wasm_memory} flag + // is used by the implementation of Wasm memory growth in order to bypass the + // non-detachable check. + V8_EXPORT_PRIVATE std::shared_ptr Detach( + bool force_for_wasm_memory = false); - V8_EXPORT_PRIVATE static void Setup( - Handle array_buffer, Isolate* isolate, bool is_external, - void* data, size_t allocated_length, - SharedFlag shared_flag = SharedFlag::kNotShared, - bool is_wasm_memory = false); - - // Initialize the object as empty one to avoid confusing heap verifier if - // the failure happened in the middle of JSArrayBuffer construction. - V8_EXPORT_PRIVATE static void SetupAsEmpty(Handle array_buffer, - Isolate* isolate); - - // Returns false if array buffer contents could not be allocated. - // In this case, |array_buffer| will not be set up. - V8_EXPORT_PRIVATE static bool SetupAllocatingData( - Handle array_buffer, Isolate* isolate, - size_t allocated_length, bool initialize = true, - SharedFlag shared_flag = SharedFlag::kNotShared) V8_WARN_UNUSED_RESULT; + // Get a reference to backing store of this array buffer, if there is a + // backing store. Returns nullptr if there is no backing store (e.g. detached + // or a zero-length array buffer). + std::shared_ptr GetBackingStore(); // Dispatched behavior. DECL_PRINTER(JSArrayBuffer) @@ -250,9 +240,6 @@ class JSTypedArray : public JSArrayBufferView { #endif private: - static Handle MaterializeArrayBuffer( - Handle typed_array); - OBJECT_CONSTRUCTORS(JSTypedArray, JSArrayBufferView); }; diff --git a/src/objects/value-serializer.cc b/src/objects/value-serializer.cc index 5a72dd6532..01fa5c67ed 100644 --- a/src/objects/value-serializer.cc +++ b/src/objects/value-serializer.cc @@ -1033,8 +1033,8 @@ Maybe ValueSerializer::WriteWasmMemory(Handle object) { return Nothing(); } - isolate_->wasm_engine()->memory_tracker()->RegisterWasmMemoryAsShared( - object, isolate_); + GlobalBackingStoreRegistry::Register( + object->array_buffer().GetBackingStore()); WriteTag(SerializationTag::kWasmMemoryTransfer); WriteZigZag(object->maximum_pages()); @@ -1809,13 +1809,12 @@ MaybeHandle ValueDeserializer::ReadJSArrayBuffer( byte_length > static_cast(end_ - position_)) { return MaybeHandle(); } - const bool should_initialize = false; - Handle array_buffer = isolate_->factory()->NewJSArrayBuffer( - SharedFlag::kNotShared, allocation_); - if (!JSArrayBuffer::SetupAllocatingData(array_buffer, isolate_, byte_length, - should_initialize)) { - return MaybeHandle(); - } + MaybeHandle result = + isolate_->factory()->NewJSArrayBufferAndBackingStore( + byte_length, InitializedFlag::kUninitialized, allocation_); + Handle array_buffer; + if (!result.ToHandle(&array_buffer)) return result; + if (byte_length > 0) { memcpy(array_buffer->backing_store(), position_, byte_length); } @@ -2057,9 +2056,6 @@ MaybeHandle ValueDeserializer::ReadWasmMemory() { Handle result = WasmMemoryObject::New(isolate_, buffer, maximum_pages); - isolate_->wasm_engine()->memory_tracker()->RegisterWasmMemoryAsShared( - result, isolate_); - AddObjectWithID(id, result); return result; } diff --git a/src/roots/roots.h b/src/roots/roots.h index e6bcd94c01..b794c0a104 100644 --- a/src/roots/roots.h +++ b/src/roots/roots.h @@ -266,7 +266,9 @@ class Symbol; V(HeapObject, weak_refs_keep_during_job, WeakRefsKeepDuringJob) \ V(HeapObject, interpreter_entry_trampoline_for_profiling, \ InterpreterEntryTrampolineForProfiling) \ - V(Object, pending_optimize_for_test_bytecode, PendingOptimizeForTestBytecode) + V(Object, pending_optimize_for_test_bytecode, \ + PendingOptimizeForTestBytecode) \ + V(WeakArrayList, shared_wasm_memories, SharedWasmMemories) // Entries in this list are limited to Smis and are not visited during GC. #define SMI_ROOT_LIST(V) \ diff --git a/src/runtime/runtime-test.cc b/src/runtime/runtime-test.cc index f0caaaa14c..1763a0fb9e 100644 --- a/src/runtime/runtime-test.cc +++ b/src/runtime/runtime-test.cc @@ -1088,17 +1088,22 @@ RUNTIME_FUNCTION(Runtime_SerializeWasmModule) { wasm::NativeModule* native_module = module_obj->native_module(); wasm::WasmSerializer wasm_serializer(native_module); - size_t compiled_size = wasm_serializer.GetSerializedNativeModuleSize(); - void* array_data = isolate->array_buffer_allocator()->Allocate(compiled_size); - Handle array_buffer = - isolate->factory()->NewJSArrayBuffer(SharedFlag::kNotShared); - JSArrayBuffer::Setup(array_buffer, isolate, false, array_data, compiled_size); - if (!array_data || - !wasm_serializer.SerializeNativeModule( - {reinterpret_cast(array_data), compiled_size})) { - return ReadOnlyRoots(isolate).undefined_value(); + size_t byte_length = wasm_serializer.GetSerializedNativeModuleSize(); + + MaybeHandle result = + isolate->factory()->NewJSArrayBufferAndBackingStore( + byte_length, InitializedFlag::kUninitialized); + + Handle array_buffer; + if (result.ToHandle(&array_buffer) && + wasm_serializer.SerializeNativeModule( + {reinterpret_cast(array_buffer->backing_store()), + byte_length})) { + return *array_buffer; } - return *array_buffer; + + // Error. Return undefined. + return ReadOnlyRoots(isolate).undefined_value(); } // Take an array buffer and attempt to reconstruct a compiled wasm module. diff --git a/src/runtime/runtime-typedarray.cc b/src/runtime/runtime-typedarray.cc index 7fab051cbf..327c102238 100644 --- a/src/runtime/runtime-typedarray.cc +++ b/src/runtime/runtime-typedarray.cc @@ -27,22 +27,7 @@ RUNTIME_FUNCTION(Runtime_ArrayBufferDetach) { isolate, NewTypeError(MessageTemplate::kNotTypedArray)); } Handle array_buffer = Handle::cast(argument); - if (!array_buffer->is_detachable()) { - return ReadOnlyRoots(isolate).undefined_value(); - } - if (array_buffer->backing_store() == nullptr) { - CHECK_EQ(0, array_buffer->byte_length()); - return ReadOnlyRoots(isolate).undefined_value(); - } - // Shared array buffers should never be detached. - CHECK(!array_buffer->is_shared()); - DCHECK(!array_buffer->is_external()); - void* backing_store = array_buffer->backing_store(); - size_t byte_length = array_buffer->byte_length(); - array_buffer->set_is_external(true); - isolate->heap()->UnregisterArrayBuffer(*array_buffer); array_buffer->Detach(); - isolate->array_buffer_allocator()->Free(backing_store, byte_length); return ReadOnlyRoots(isolate).undefined_value(); } diff --git a/src/snapshot/deserializer.cc b/src/snapshot/deserializer.cc index 25e32e2cc0..98a453b0da 100644 --- a/src/snapshot/deserializer.cc +++ b/src/snapshot/deserializer.cc @@ -295,19 +295,23 @@ HeapObject Deserializer::PostProcessNewObject(HeapObject obj, if (!typed_array.is_on_heap()) { Smi store_index( reinterpret_cast
(typed_array.external_pointer())); - byte* backing_store = off_heap_backing_stores_[store_index.value()] + - typed_array.byte_offset(); - typed_array.set_external_pointer(backing_store); + auto backing_store = backing_stores_[store_index.value()]; + auto start = backing_store + ? reinterpret_cast(backing_store->buffer_start()) + : nullptr; + typed_array.set_external_pointer(start + typed_array.byte_offset()); } } else if (obj.IsJSArrayBuffer()) { JSArrayBuffer buffer = JSArrayBuffer::cast(obj); // Only fixup for the off-heap case. if (buffer.backing_store() != nullptr) { Smi store_index(reinterpret_cast
(buffer.backing_store())); - void* backing_store = off_heap_backing_stores_[store_index.value()]; - - buffer.set_backing_store(backing_store); - isolate_->heap()->RegisterNewArrayBuffer(buffer); + auto backing_store = backing_stores_[store_index.value()]; + if (backing_store) { + buffer.Attach(backing_store); + } else { + buffer.SetupEmpty(SharedFlag::kNotShared); + } } } else if (obj.IsBytecodeArray()) { // TODO(mythria): Remove these once we store the default values for these @@ -669,12 +673,12 @@ bool Deserializer::ReadData(TSlot current, TSlot limit, case kOffHeapBackingStore: { int byte_length = source_.GetInt(); - byte* backing_store = static_cast( - isolate->array_buffer_allocator()->AllocateUninitialized( - byte_length)); + std::unique_ptr backing_store = + BackingStore::Allocate(isolate, byte_length, SharedFlag::kNotShared, + InitializedFlag::kUninitialized); CHECK_NOT_NULL(backing_store); - source_.CopyRaw(backing_store, byte_length); - off_heap_backing_stores_.push_back(backing_store); + source_.CopyRaw(backing_store->buffer_start(), byte_length); + backing_stores_.push_back(std::move(backing_store)); break; } diff --git a/src/snapshot/deserializer.h b/src/snapshot/deserializer.h index 8dce1b3f3f..9f66c37ac5 100644 --- a/src/snapshot/deserializer.h +++ b/src/snapshot/deserializer.h @@ -10,6 +10,7 @@ #include "src/objects/allocation-site.h" #include "src/objects/api-callbacks.h" +#include "src/objects/backing-store.h" #include "src/objects/code.h" #include "src/objects/js-array.h" #include "src/objects/map.h" @@ -56,7 +57,7 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer { allocator()->DecodeReservation(data->Reservations()); // We start the indices here at 1, so that we can distinguish between an // actual index and a nullptr in a deserialized object requiring fix-up. - off_heap_backing_stores_.push_back(nullptr); + backing_stores_.push_back({}); } void Initialize(Isolate* isolate); @@ -173,7 +174,7 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer { std::vector call_handler_infos_; std::vector> new_internalized_strings_; std::vector> new_scripts_; - std::vector off_heap_backing_stores_; + std::vector> backing_stores_; DeserializerAllocator allocator_; const bool deserializing_user_code_; diff --git a/src/wasm/c-api.cc b/src/wasm/c-api.cc index 86bba189b8..32bf4956b3 100644 --- a/src/wasm/c-api.cc +++ b/src/wasm/c-api.cc @@ -1734,9 +1734,10 @@ auto Memory::make(Store* store_abs, const MemoryType* type) -> own { if (maximum < minimum) return nullptr; if (maximum > i::wasm::kSpecMaxWasmMemoryPages) return nullptr; } - bool is_shared = false; // TODO(wasm+): Support shared memory. + // TODO(wasm+): Support shared memory. + i::SharedFlag shared = i::SharedFlag::kNotShared; i::Handle memory_obj; - if (!i::WasmMemoryObject::New(isolate, minimum, maximum, is_shared) + if (!i::WasmMemoryObject::New(isolate, minimum, maximum, shared) .ToHandle(&memory_obj)) { return own(); } diff --git a/src/wasm/module-compiler.cc b/src/wasm/module-compiler.cc index f15d90744f..fe12d66397 100644 --- a/src/wasm/module-compiler.cc +++ b/src/wasm/module-compiler.cc @@ -31,7 +31,6 @@ #include "src/wasm/wasm-import-wrapper-cache.h" #include "src/wasm/wasm-js.h" #include "src/wasm/wasm-limits.h" -#include "src/wasm/wasm-memory.h" #include "src/wasm/wasm-objects-inl.h" #include "src/wasm/wasm-opcodes.h" #include "src/wasm/wasm-result.h" diff --git a/src/wasm/module-instantiate.cc b/src/wasm/module-instantiate.cc index 7dd05a16ca..6baa27b3bb 100644 --- a/src/wasm/module-instantiate.cc +++ b/src/wasm/module-instantiate.cc @@ -93,7 +93,7 @@ class InstanceBuilder { InstanceBuilder(Isolate* isolate, ErrorThrower* thrower, Handle module_object, MaybeHandle ffi, - MaybeHandle memory); + MaybeHandle memory_buffer); // Build an instance, in all of its glory. MaybeHandle Build(); @@ -114,7 +114,8 @@ class InstanceBuilder { ErrorThrower* thrower_; Handle module_object_; MaybeHandle ffi_; - MaybeHandle memory_; + MaybeHandle memory_buffer_; + Handle memory_object_; Handle untagged_globals_; Handle tagged_globals_; std::vector> exception_wrappers_; @@ -165,9 +166,11 @@ class InstanceBuilder { void SanitizeImports(); - // Find the imported memory buffer if there is one. This is used to see if we - // need to recompile with bounds checks before creating the instance. - MaybeHandle FindImportedMemoryBuffer() const; + // Find the imported memory if there is one. + bool FindImportedMemory(); + + // Allocate the memory. + bool AllocateMemory(); // Processes a single imported function. bool ProcessImportedFunction(Handle instance, @@ -221,9 +224,6 @@ class InstanceBuilder { // Process initialization of globals. void InitGlobals(Handle instance); - // Allocate memory for a module instance as a new JSArrayBuffer. - Handle AllocateMemory(uint32_t initial_pages, - uint32_t maximum_pages); bool NeedsWrappers() const; @@ -243,8 +243,9 @@ class InstanceBuilder { MaybeHandle InstantiateToInstanceObject( Isolate* isolate, ErrorThrower* thrower, Handle module_object, MaybeHandle imports, - MaybeHandle memory) { - InstanceBuilder builder(isolate, thrower, module_object, imports, memory); + MaybeHandle memory_buffer) { + InstanceBuilder builder(isolate, thrower, module_object, imports, + memory_buffer); auto instance = builder.Build(); if (!instance.is_null() && builder.ExecuteStartFunction()) { return instance; @@ -256,14 +257,14 @@ MaybeHandle InstantiateToInstanceObject( InstanceBuilder::InstanceBuilder(Isolate* isolate, ErrorThrower* thrower, Handle module_object, MaybeHandle ffi, - MaybeHandle memory) + MaybeHandle memory_buffer) : isolate_(isolate), enabled_(module_object->native_module()->enabled_features()), module_(module_object->module()), thrower_(thrower), module_object_(module_object), ffi_(ffi), - memory_(memory) { + memory_buffer_(memory_buffer) { sanitized_imports_.reserve(module_->import_table.size()); } @@ -289,7 +290,7 @@ MaybeHandle InstanceBuilder::Build() { NativeModule* native_module = module_object_->native_module(); //-------------------------------------------------------------------------- - // Allocate the memory array buffer. + // Set up the memory buffer and memory objects. //-------------------------------------------------------------------------- uint32_t initial_pages = module_->initial_pages; auto initial_pages_counter = SELECT_WASM_COUNTER( @@ -301,31 +302,23 @@ MaybeHandle InstanceBuilder::Build() { isolate_->counters()->wasm_wasm_max_mem_pages_count(); max_pages_counter->AddSample(module_->maximum_pages); } - // Asm.js has memory_ already set at this point, so we don't want to - // overwrite it. - if (memory_.is_null()) { - memory_ = FindImportedMemoryBuffer(); - } - if (!memory_.is_null()) { - // Set externally passed ArrayBuffer non detachable. - Handle memory = memory_.ToHandleChecked(); - memory->set_is_detachable(false); - DCHECK_IMPLIES(native_module->use_trap_handler(), - module_->origin == kAsmJsOrigin || - memory->is_wasm_memory() || - memory->backing_store() == nullptr); - } else if (initial_pages > 0 || native_module->use_trap_handler()) { - // We need to unconditionally create a guard region if using trap handlers, - // even when the size is zero to prevent null-dereference issues - // (e.g. https://crbug.com/769637). - // Allocate memory if the initial size is more than 0 pages. - memory_ = AllocateMemory(initial_pages, module_->maximum_pages); - if (memory_.is_null()) { - // failed to allocate memory - DCHECK(isolate_->has_pending_exception() || thrower_->error()); - return {}; + if (memory_buffer_.is_null()) { + // Search for imported memory first. + if (!FindImportedMemory()) { + if (!AllocateMemory()) { + DCHECK(isolate_->has_pending_exception() || thrower_->error()); + return {}; + } } + } else { + // Asm.js has {memory_buffer_} already set at this point. + DCHECK_EQ(kAsmJsOrigin, module_->origin); + // asm.js instantiation should have set these flags. + DCHECK(!memory_buffer_.ToHandleChecked()->is_detachable()); + DCHECK(memory_buffer_.ToHandleChecked()->is_asmjs_memory()); + memory_object_ = + WasmMemoryObject::New(isolate_, memory_buffer_, kV8MaxWasmMemoryPages); } //-------------------------------------------------------------------------- @@ -334,33 +327,42 @@ MaybeHandle InstanceBuilder::Build() { TRACE("New module instantiation for %p\n", native_module); Handle instance = WasmInstanceObject::New(isolate_, module_object_); - NativeModuleModificationScope native_modification_scope(native_module); + + //-------------------------------------------------------------------------- + // Attach the memory to the instance. + //-------------------------------------------------------------------------- + if (module_->has_memory) { + DCHECK(!memory_object_.is_null()); + if (!instance->has_memory_object()) { + instance->set_memory_object(*memory_object_); + } + // Add the instance object to the list of instances for this memory. + WasmMemoryObject::AddInstance(isolate_, memory_object_, instance); + + // Double-check the {memory} array buffer matches the instance. + Handle memory = memory_buffer_.ToHandleChecked(); + CHECK_EQ(instance->memory_size(), memory->byte_length()); + CHECK_EQ(instance->memory_start(), memory->backing_store()); + } //-------------------------------------------------------------------------- // Set up the globals for the new instance. //-------------------------------------------------------------------------- uint32_t untagged_globals_buffer_size = module_->untagged_globals_buffer_size; if (untagged_globals_buffer_size > 0) { - void* backing_store = isolate_->array_buffer_allocator()->Allocate( - untagged_globals_buffer_size); - if (backing_store == nullptr) { - thrower_->RangeError("Out of memory: wasm globals"); - return {}; - } - untagged_globals_ = isolate_->factory()->NewJSArrayBuffer( - SharedFlag::kNotShared, AllocationType::kOld); - constexpr bool is_external = false; - constexpr bool is_wasm_memory = false; - JSArrayBuffer::Setup(untagged_globals_, isolate_, is_external, - backing_store, untagged_globals_buffer_size, - SharedFlag::kNotShared, is_wasm_memory); - if (untagged_globals_.is_null()) { + MaybeHandle result = + isolate_->factory()->NewJSArrayBufferAndBackingStore( + untagged_globals_buffer_size, InitializedFlag::kZeroInitialized, + AllocationType::kOld); + + if (!result.ToHandle(&untagged_globals_)) { thrower_->RangeError("Out of memory: wasm globals"); return {}; } + + instance->set_untagged_globals_buffer(*untagged_globals_); instance->set_globals_start( reinterpret_cast(untagged_globals_->backing_store())); - instance->set_untagged_globals_buffer(*untagged_globals_); } uint32_t tagged_globals_buffer_size = module_->tagged_globals_buffer_size; @@ -426,6 +428,8 @@ MaybeHandle InstanceBuilder::Build() { instance->set_indirect_function_tables(*tables); } + NativeModuleModificationScope native_modification_scope(native_module); + //-------------------------------------------------------------------------- // Process the imports for the module. //-------------------------------------------------------------------------- @@ -451,30 +455,6 @@ MaybeHandle InstanceBuilder::Build() { InitializeExceptions(instance); } - //-------------------------------------------------------------------------- - // Create the WebAssembly.Memory object. - //-------------------------------------------------------------------------- - if (module_->has_memory) { - if (!instance->has_memory_object()) { - // No memory object exists. Create one. - Handle memory_object = WasmMemoryObject::New( - isolate_, memory_, - module_->maximum_pages != 0 ? module_->maximum_pages : -1); - instance->set_memory_object(*memory_object); - } - - // Add the instance object to the list of instances for this memory. - Handle memory_object(instance->memory_object(), isolate_); - WasmMemoryObject::AddInstance(isolate_, memory_object, instance); - - if (!memory_.is_null()) { - // Double-check the {memory} array buffer matches the instance. - Handle memory = memory_.ToHandleChecked(); - CHECK_EQ(instance->memory_size(), memory->byte_length()); - CHECK_EQ(instance->memory_start(), memory->backing_store()); - } - } - // The bulk memory proposal changes the MVP behavior here; the segments are // written as if `memory.init` and `table.init` are executed directly, and // not bounds checked ahead of time. @@ -807,22 +787,21 @@ void InstanceBuilder::SanitizeImports() { } } -MaybeHandle InstanceBuilder::FindImportedMemoryBuffer() const { +bool InstanceBuilder::FindImportedMemory() { DCHECK_EQ(module_->import_table.size(), sanitized_imports_.size()); for (size_t index = 0; index < module_->import_table.size(); index++) { - const WasmImport& import = module_->import_table[index]; + WasmImport import = module_->import_table[index]; if (import.kind == kExternalMemory) { - const auto& value = sanitized_imports_[index].value; - if (!value->IsWasmMemoryObject()) { - return {}; - } - auto memory = Handle::cast(value); - Handle buffer(memory->array_buffer(), isolate_); - return buffer; + auto& value = sanitized_imports_[index].value; + if (!value->IsWasmMemoryObject()) return false; + memory_object_ = Handle::cast(value); + memory_buffer_ = + Handle(memory_object_->array_buffer(), isolate_); + return true; } } - return {}; + return false; } bool InstanceBuilder::ProcessImportedFunction( @@ -1012,19 +991,19 @@ bool InstanceBuilder::ProcessImportedMemory(Handle instance, Handle module_name, Handle import_name, Handle value) { - // Validation should have failed if more than one memory object was - // provided. - DCHECK(!instance->has_memory_object()); if (!value->IsWasmMemoryObject()) { ReportLinkError("memory import must be a WebAssembly.Memory object", import_index, module_name, import_name); return false; } - auto memory = Handle::cast(value); - instance->set_memory_object(*memory); - Handle buffer(memory->array_buffer(), isolate_); + auto memory_object = Handle::cast(value); + + // The imported memory should have been already set up early. + CHECK_EQ(instance->memory_object(), *memory_object); + + Handle buffer(memory_object_->array_buffer(), isolate_); // memory_ should have already been assigned in Build(). - DCHECK_EQ(*memory_.ToHandleChecked(), *buffer); + DCHECK_EQ(*memory_buffer_.ToHandleChecked(), *buffer); uint32_t imported_cur_pages = static_cast(buffer->byte_length() / kWasmPageSize); if (imported_cur_pages < module_->initial_pages) { @@ -1033,7 +1012,7 @@ bool InstanceBuilder::ProcessImportedMemory(Handle instance, imported_cur_pages); return false; } - int32_t imported_maximum_pages = memory->maximum_pages(); + int32_t imported_maximum_pages = memory_object_->maximum_pages(); if (module_->has_maximum_pages) { if (imported_maximum_pages < 0) { thrower_->LinkError( @@ -1408,27 +1387,27 @@ void InstanceBuilder::InitGlobals(Handle instance) { } // Allocate memory for a module instance as a new JSArrayBuffer. -Handle InstanceBuilder::AllocateMemory(uint32_t initial_pages, - uint32_t maximum_pages) { +bool InstanceBuilder::AllocateMemory() { + auto initial_pages = module_->initial_pages; + auto maximum_pages = module_->has_maximum_pages ? module_->maximum_pages : -1; if (initial_pages > max_mem_pages()) { thrower_->RangeError("Out of memory: wasm memory too large"); - return Handle::null(); + return false; } - const bool is_shared_memory = module_->has_shared_memory && enabled_.threads; - Handle mem_buffer; - if (is_shared_memory) { - if (!NewSharedArrayBuffer(isolate_, initial_pages * kWasmPageSize, - maximum_pages * kWasmPageSize) - .ToHandle(&mem_buffer)) { - thrower_->RangeError("Out of memory: wasm shared memory"); - } - } else { - if (!NewArrayBuffer(isolate_, initial_pages * kWasmPageSize) - .ToHandle(&mem_buffer)) { - thrower_->RangeError("Out of memory: wasm memory"); - } + auto shared = (module_->has_shared_memory && enabled_.threads) + ? SharedFlag::kShared + : SharedFlag::kNotShared; + + MaybeHandle result = + WasmMemoryObject::New(isolate_, initial_pages, maximum_pages, shared); + + if (!result.ToHandle(&memory_object_)) { + thrower_->RangeError("Out of memory: wasm memory"); + return false; } - return mem_buffer; + memory_buffer_ = + Handle(memory_object_->array_buffer(), isolate_); + return true; } bool InstanceBuilder::NeedsWrappers() const { diff --git a/src/wasm/wasm-code-manager.cc b/src/wasm/wasm-code-manager.cc index 9963aaf9a7..dd14673d69 100644 --- a/src/wasm/wasm-code-manager.cc +++ b/src/wasm/wasm-code-manager.cc @@ -1172,10 +1172,8 @@ NativeModule::~NativeModule() { import_wrapper_cache_.reset(); } -WasmCodeManager::WasmCodeManager(WasmMemoryTracker* memory_tracker, - size_t max_committed) - : memory_tracker_(memory_tracker), - max_committed_code_space_(max_committed), +WasmCodeManager::WasmCodeManager(size_t max_committed) + : max_committed_code_space_(max_committed), #if defined(V8_OS_WIN_X64) is_win64_unwind_info_disabled_for_testing_(false), #endif @@ -1252,12 +1250,12 @@ VirtualMemory WasmCodeManager::TryAllocate(size_t size, void* hint) { DCHECK_GT(size, 0); size_t allocate_page_size = page_allocator->AllocatePageSize(); size = RoundUp(size, allocate_page_size); - if (!memory_tracker_->ReserveAddressSpace(size)) return {}; + if (!BackingStore::ReserveAddressSpace(size)) return {}; if (hint == nullptr) hint = page_allocator->GetRandomMmapAddr(); VirtualMemory mem(page_allocator, size, hint, allocate_page_size); if (!mem.IsReserved()) { - memory_tracker_->ReleaseReservation(size); + BackingStore::ReleaseReservation(size); return {}; } TRACE_HEAP("VMem alloc: 0x%" PRIxPTR ":0x%" PRIxPTR " (%zu)\n", mem.address(), @@ -1484,7 +1482,7 @@ void WasmCodeManager::FreeNativeModule(Vector owned_code_space, #endif lookup_map_.erase(code_space.address()); - memory_tracker_->ReleaseReservation(code_space.size()); + BackingStore::ReleaseReservation(code_space.size()); code_space.Free(); DCHECK(!code_space.IsReserved()); } diff --git a/src/wasm/wasm-code-manager.h b/src/wasm/wasm-code-manager.h index 3a529c7c48..cf322ac575 100644 --- a/src/wasm/wasm-code-manager.h +++ b/src/wasm/wasm-code-manager.h @@ -38,7 +38,6 @@ class NativeModule; class WasmCodeManager; struct WasmCompilationResult; class WasmEngine; -class WasmMemoryTracker; class WasmImportWrapperCache; struct WasmModule; @@ -603,8 +602,7 @@ class V8_EXPORT_PRIVATE NativeModule final { class V8_EXPORT_PRIVATE WasmCodeManager final { public: - explicit WasmCodeManager(WasmMemoryTracker* memory_tracker, - size_t max_committed); + explicit WasmCodeManager(size_t max_committed); #ifdef DEBUG ~WasmCodeManager() { @@ -653,8 +651,6 @@ class V8_EXPORT_PRIVATE WasmCodeManager final { void AssignRange(base::AddressRegion, NativeModule*); - WasmMemoryTracker* const memory_tracker_; - size_t max_committed_code_space_; #if defined(V8_OS_WIN_X64) diff --git a/src/wasm/wasm-engine.cc b/src/wasm/wasm-engine.cc index 482398415f..3bae7a10d1 100644 --- a/src/wasm/wasm-engine.cc +++ b/src/wasm/wasm-engine.cc @@ -211,8 +211,7 @@ struct WasmEngine::NativeModuleInfo { int8_t num_code_gcs_triggered = 0; }; -WasmEngine::WasmEngine() - : code_manager_(&memory_tracker_, FLAG_wasm_max_code_space * MB) {} +WasmEngine::WasmEngine() : code_manager_(FLAG_wasm_max_code_space * MB) {} WasmEngine::~WasmEngine() { // Synchronize on all background compile tasks. diff --git a/src/wasm/wasm-engine.h b/src/wasm/wasm-engine.h index 49825c19b7..5dd4fc3c00 100644 --- a/src/wasm/wasm-engine.h +++ b/src/wasm/wasm-engine.h @@ -10,7 +10,6 @@ #include "src/tasks/cancelable-task.h" #include "src/wasm/wasm-code-manager.h" -#include "src/wasm/wasm-memory.h" #include "src/wasm/wasm-tier.h" #include "src/zone/accounting-allocator.h" @@ -23,6 +22,7 @@ class CompilationStatistics; class HeapNumber; class WasmInstanceObject; class WasmModuleObject; +class JSArrayBuffer; namespace wasm { @@ -120,8 +120,6 @@ class V8_EXPORT_PRIVATE WasmEngine { WasmCodeManager* code_manager() { return &code_manager_; } - WasmMemoryTracker* memory_tracker() { return &memory_tracker_; } - AccountingAllocator* allocator() { return &allocator_; } // Compilation statistics for TurboFan compilations. @@ -240,7 +238,6 @@ class V8_EXPORT_PRIVATE WasmEngine { // calling this method. void PotentiallyFinishCurrentGC(); - WasmMemoryTracker memory_tracker_; WasmCodeManager code_manager_; AccountingAllocator allocator_; diff --git a/src/wasm/wasm-js.cc b/src/wasm/wasm-js.cc index 1ee76fc11d..28274fcd51 100644 --- a/src/wasm/wasm-js.cc +++ b/src/wasm/wasm-js.cc @@ -26,7 +26,6 @@ #include "src/wasm/streaming-decoder.h" #include "src/wasm/wasm-engine.h" #include "src/wasm/wasm-limits.h" -#include "src/wasm/wasm-memory.h" #include "src/wasm/wasm-objects-inl.h" #include "src/wasm/wasm-serialization.h" @@ -1156,7 +1155,7 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo& args) { return; } - bool is_shared_memory = false; + auto shared = i::SharedFlag::kNotShared; auto enabled_features = i::wasm::WasmFeaturesFromIsolate(i_isolate); if (enabled_features.threads) { // Shared property of descriptor @@ -1165,10 +1164,11 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo& args) { descriptor->Get(context, shared_key); v8::Local value; if (maybe_value.ToLocal(&value)) { - is_shared_memory = value->BooleanValue(isolate); + shared = value->BooleanValue(isolate) ? i::SharedFlag::kShared + : i::SharedFlag::kNotShared; } // Throw TypeError if shared is true, and the descriptor has no "maximum" - if (is_shared_memory && maximum == -1) { + if (shared == i::SharedFlag::kShared && maximum == -1) { thrower.TypeError( "If shared is true, maximum property should be defined."); return; @@ -1177,13 +1177,12 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo& args) { i::Handle memory_obj; if (!i::WasmMemoryObject::New(i_isolate, static_cast(initial), - static_cast(maximum), - is_shared_memory) + static_cast(maximum), shared) .ToHandle(&memory_obj)) { thrower.RangeError("could not allocate memory"); return; } - if (is_shared_memory) { + if (shared == i::SharedFlag::kShared) { i::Handle buffer( i::Handle::cast(memory_obj)->array_buffer(), i_isolate); diff --git a/src/wasm/wasm-memory.cc b/src/wasm/wasm-memory.cc deleted file mode 100644 index f203649542..0000000000 --- a/src/wasm/wasm-memory.cc +++ /dev/null @@ -1,633 +0,0 @@ -// Copyright 2017 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include - -#include "src/heap/heap-inl.h" -#include "src/logging/counters.h" -#include "src/objects/js-array-buffer-inl.h" -#include "src/objects/objects-inl.h" -#include "src/wasm/wasm-engine.h" -#include "src/wasm/wasm-limits.h" -#include "src/wasm/wasm-memory.h" -#include "src/wasm/wasm-module.h" - -namespace v8 { -namespace internal { -namespace wasm { - -namespace { - -constexpr size_t kNegativeGuardSize = 1u << 31; // 2GiB - -void AddAllocationStatusSample(Isolate* isolate, - WasmMemoryTracker::AllocationStatus status) { - isolate->counters()->wasm_memory_allocation_result()->AddSample( - static_cast(status)); -} - -bool RunWithGCAndRetry(const std::function& fn, Heap* heap, - bool* did_retry) { - // Try up to three times; getting rid of dead JSArrayBuffer allocations might - // require two GCs because the first GC maybe incremental and may have - // floating garbage. - static constexpr int kAllocationRetries = 2; - - for (int trial = 0;; ++trial) { - if (fn()) return true; - // {fn} failed. If {kAllocationRetries} is reached, fail. - *did_retry = true; - if (trial == kAllocationRetries) return false; - // Otherwise, collect garbage and retry. - // TODO(wasm): Since reservation limits are engine-wide, we should do an - // engine-wide GC here (i.e. trigger a GC in each isolate using the engine, - // and wait for them all to finish). See https://crbug.com/v8/9405. - heap->MemoryPressureNotification(MemoryPressureLevel::kCritical, true); - } -} - -void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap, - size_t size, size_t max_size, - void** allocation_base, - size_t* allocation_length) { - using AllocationStatus = WasmMemoryTracker::AllocationStatus; -#if V8_TARGET_ARCH_64_BIT - constexpr bool kRequireFullGuardRegions = true; -#else - constexpr bool kRequireFullGuardRegions = false; -#endif - // Let the WasmMemoryTracker know we are going to reserve a bunch of - // address space. - size_t reservation_size = std::max(max_size, size); - bool did_retry = false; - - auto reserve_memory_space = [&] { - // For guard regions, we always allocate the largest possible offset - // into the heap, so the addressable memory after the guard page can - // be made inaccessible. - // - // To protect against 32-bit integer overflow issues, we also - // protect the 2GiB before the valid part of the memory buffer. - *allocation_length = - kRequireFullGuardRegions - ? RoundUp(kWasmMaxHeapOffset + kNegativeGuardSize, CommitPageSize()) - : RoundUp(base::bits::RoundUpToPowerOfTwo(reservation_size), - kWasmPageSize); - DCHECK_GE(*allocation_length, size); - DCHECK_GE(*allocation_length, kWasmPageSize); - - return memory_tracker->ReserveAddressSpace(*allocation_length); - }; - if (!RunWithGCAndRetry(reserve_memory_space, heap, &did_retry)) { - // Reset reservation_size to initial size so that at least the initial size - // can be allocated if maximum size reservation is not possible. - reservation_size = size; - - // We are over the address space limit. Fail. - // - // When running under the correctness fuzzer (i.e. - // --correctness-fuzzer-suppressions is preset), we crash - // instead so it is not incorrectly reported as a correctness - // violation. See https://crbug.com/828293#c4 - if (FLAG_correctness_fuzzer_suppressions) { - FATAL("could not allocate wasm memory"); - } - AddAllocationStatusSample( - heap->isolate(), AllocationStatus::kAddressSpaceLimitReachedFailure); - return nullptr; - } - - // The Reserve makes the whole region inaccessible by default. - DCHECK_NULL(*allocation_base); - auto allocate_pages = [&] { - *allocation_base = - AllocatePages(GetPlatformPageAllocator(), nullptr, *allocation_length, - kWasmPageSize, PageAllocator::kNoAccess); - return *allocation_base != nullptr; - }; - if (!RunWithGCAndRetry(allocate_pages, heap, &did_retry)) { - memory_tracker->ReleaseReservation(*allocation_length); - AddAllocationStatusSample(heap->isolate(), AllocationStatus::kOtherFailure); - return nullptr; - } - - byte* memory = reinterpret_cast(*allocation_base); - if (kRequireFullGuardRegions) { - memory += kNegativeGuardSize; - } - - // Make the part we care about accessible. - auto commit_memory = [&] { - return size == 0 || SetPermissions(GetPlatformPageAllocator(), memory, - RoundUp(size, kWasmPageSize), - PageAllocator::kReadWrite); - }; - // SetPermissions commits the extra memory, which may put us over the - // process memory limit. If so, report this as an OOM. - if (!RunWithGCAndRetry(commit_memory, heap, &did_retry)) { - V8::FatalProcessOutOfMemory(nullptr, "TryAllocateBackingStore"); - } - - memory_tracker->RegisterAllocation(heap->isolate(), *allocation_base, - *allocation_length, memory, size); - AddAllocationStatusSample(heap->isolate(), - did_retry ? AllocationStatus::kSuccessAfterRetry - : AllocationStatus::kSuccess); - return memory; -} - -#if V8_TARGET_ARCH_MIPS64 -// MIPS64 has a user space of 2^40 bytes on most processors, -// address space limits needs to be smaller. -constexpr size_t kAddressSpaceLimit = 0x8000000000L; // 512 GiB -#elif V8_TARGET_ARCH_64_BIT -constexpr size_t kAddressSpaceLimit = 0x10100000000L; // 1 TiB + 4 GiB -#else -constexpr size_t kAddressSpaceLimit = 0xC0000000; // 3 GiB -#endif - -} // namespace - -WasmMemoryTracker::~WasmMemoryTracker() { - // All reserved address space should be released before the allocation tracker - // is destroyed. - DCHECK_EQ(reserved_address_space_, 0u); - DCHECK_EQ(allocated_address_space_, 0u); - DCHECK(allocations_.empty()); -} - -void* WasmMemoryTracker::TryAllocateBackingStoreForTesting( - Heap* heap, size_t size, void** allocation_base, - size_t* allocation_length) { - return TryAllocateBackingStore(this, heap, size, size, allocation_base, - allocation_length); -} - -void WasmMemoryTracker::FreeBackingStoreForTesting(base::AddressRegion memory, - void* buffer_start) { - base::MutexGuard scope_lock(&mutex_); - ReleaseAllocation_Locked(nullptr, buffer_start); - CHECK(FreePages(GetPlatformPageAllocator(), - reinterpret_cast(memory.begin()), memory.size())); -} - -bool WasmMemoryTracker::ReserveAddressSpace(size_t num_bytes) { - size_t reservation_limit = kAddressSpaceLimit; - while (true) { - size_t old_count = reserved_address_space_.load(); - if (old_count > reservation_limit) return false; - if (reservation_limit - old_count < num_bytes) return false; - if (reserved_address_space_.compare_exchange_weak(old_count, - old_count + num_bytes)) { - return true; - } - } -} - -void WasmMemoryTracker::ReleaseReservation(size_t num_bytes) { - size_t const old_reserved = reserved_address_space_.fetch_sub(num_bytes); - USE(old_reserved); - DCHECK_LE(num_bytes, old_reserved); -} - -void WasmMemoryTracker::RegisterAllocation(Isolate* isolate, - void* allocation_base, - size_t allocation_length, - void* buffer_start, - size_t buffer_length) { - base::MutexGuard scope_lock(&mutex_); - - allocated_address_space_ += allocation_length; - // Report address space usage in MiB so the full range fits in an int on all - // platforms. - isolate->counters()->wasm_address_space_usage_mb()->AddSample( - static_cast(allocated_address_space_ / MB)); - - allocations_.emplace(buffer_start, - AllocationData{allocation_base, allocation_length, - buffer_start, buffer_length}); -} - -WasmMemoryTracker::AllocationData WasmMemoryTracker::ReleaseAllocation_Locked( - Isolate* isolate, const void* buffer_start) { - auto find_result = allocations_.find(buffer_start); - CHECK_NE(find_result, allocations_.end()); - - size_t num_bytes = find_result->second.allocation_length; - DCHECK_LE(num_bytes, reserved_address_space_); - DCHECK_LE(num_bytes, allocated_address_space_); - reserved_address_space_ -= num_bytes; - allocated_address_space_ -= num_bytes; - - AllocationData allocation_data = find_result->second; - allocations_.erase(find_result); - return allocation_data; -} - -const WasmMemoryTracker::AllocationData* WasmMemoryTracker::FindAllocationData( - const void* buffer_start) { - base::MutexGuard scope_lock(&mutex_); - const auto& result = allocations_.find(buffer_start); - if (result != allocations_.end()) { - return &result->second; - } - return nullptr; -} - -bool WasmMemoryTracker::IsWasmMemory(const void* buffer_start) { - base::MutexGuard scope_lock(&mutex_); - return allocations_.find(buffer_start) != allocations_.end(); -} - -bool WasmMemoryTracker::IsWasmSharedMemory(const void* buffer_start) { - base::MutexGuard scope_lock(&mutex_); - const auto& result = allocations_.find(buffer_start); - // Should be a wasm allocation, and registered as a shared allocation. - return (result != allocations_.end() && result->second.is_shared); -} - -void WasmMemoryTracker::MarkWasmMemoryNotGrowable( - Handle buffer) { - base::MutexGuard scope_lock(&mutex_); - const auto& allocation = allocations_.find(buffer->backing_store()); - if (allocation == allocations_.end()) return; - allocation->second.is_growable = false; -} - -bool WasmMemoryTracker::IsWasmMemoryGrowable(Handle buffer) { - base::MutexGuard scope_lock(&mutex_); - if (buffer->backing_store() == nullptr) return true; - const auto& allocation = allocations_.find(buffer->backing_store()); - if (allocation == allocations_.end()) return false; - return allocation->second.is_growable; -} - -bool WasmMemoryTracker::FreeWasmMemory(Isolate* isolate, - const void* buffer_start) { - base::MutexGuard scope_lock(&mutex_); - const auto& result = allocations_.find(buffer_start); - if (result == allocations_.end()) return false; - if (result->second.is_shared) { - // This is a shared WebAssembly.Memory allocation - FreeMemoryIfNotShared_Locked(isolate, buffer_start); - return true; - } - // This is a WebAssembly.Memory allocation - const AllocationData allocation = - ReleaseAllocation_Locked(isolate, buffer_start); - CHECK(FreePages(GetPlatformPageAllocator(), allocation.allocation_base, - allocation.allocation_length)); - return true; -} - -void WasmMemoryTracker::RegisterWasmMemoryAsShared( - Handle object, Isolate* isolate) { - // Only register with the tracker if shared grow is enabled. - if (!FLAG_wasm_grow_shared_memory) return; - const void* backing_store = object->array_buffer().backing_store(); - // TODO(V8:8810): This should be a DCHECK, currently some tests do not - // use a full WebAssembly.Memory, and fail on registering so return early. - if (!IsWasmMemory(backing_store)) return; - { - base::MutexGuard scope_lock(&mutex_); - // Register as shared allocation when it is post messaged. This happens only - // the first time a buffer is shared over Postmessage, and track all the - // memory objects that are associated with this backing store. - RegisterSharedWasmMemory_Locked(object, isolate); - // Add isolate to backing store mapping. - isolates_per_buffer_[backing_store].emplace(isolate); - } -} - -void WasmMemoryTracker::SetPendingUpdateOnGrow(Handle old_buffer, - size_t new_size) { - base::MutexGuard scope_lock(&mutex_); - // Keep track of the new size of the buffer associated with each backing - // store. - AddBufferToGrowMap_Locked(old_buffer, new_size); - // Request interrupt to GROW_SHARED_MEMORY to other isolates - TriggerSharedGrowInterruptOnAllIsolates_Locked(old_buffer); -} - -void WasmMemoryTracker::UpdateSharedMemoryInstances(Isolate* isolate) { - base::MutexGuard scope_lock(&mutex_); - // For every buffer in the grow_entry_map_, update the size for all the - // memory objects associated with this isolate. - for (auto it = grow_update_map_.begin(); it != grow_update_map_.end();) { - UpdateSharedMemoryStateOnInterrupt_Locked(isolate, it->first, it->second); - // If all the isolates that share this buffer have hit a stack check, their - // memory objects are updated, and this grow entry can be erased. - if (AreAllIsolatesUpdated_Locked(it->first)) { - it = grow_update_map_.erase(it); - } else { - it++; - } - } -} - -void WasmMemoryTracker::RegisterSharedWasmMemory_Locked( - Handle object, Isolate* isolate) { - DCHECK(object->array_buffer().is_shared()); - - void* backing_store = object->array_buffer().backing_store(); - // The allocation of a WasmMemoryObject should always be registered with the - // WasmMemoryTracker. - const auto& result = allocations_.find(backing_store); - if (result == allocations_.end()) return; - - // Register the allocation as shared, if not alreadt marked as shared. - if (!result->second.is_shared) result->second.is_shared = true; - - // Create persistent global handles for the memory objects that are shared - GlobalHandles* global_handles = isolate->global_handles(); - object = global_handles->Create(*object); - - // Add to memory_object_vector to track memory objects, instance objects - // that will need to be updated on a Grow call - result->second.memory_object_vector.push_back( - SharedMemoryObjectState(object, isolate)); -} - -void WasmMemoryTracker::AddBufferToGrowMap_Locked( - Handle old_buffer, size_t new_size) { - void* backing_store = old_buffer->backing_store(); - auto entry = grow_update_map_.find(old_buffer->backing_store()); - if (entry == grow_update_map_.end()) { - // No pending grow for this backing store, add to map. - grow_update_map_.emplace(backing_store, new_size); - return; - } - // If grow on the same buffer is requested before the update is complete, - // the new_size should always be greater or equal to the old_size. Equal - // in the case that grow(0) is called, but new buffer handles are mandated - // by the Spec. - CHECK_LE(entry->second, new_size); - entry->second = new_size; - // Flush instances_updated everytime a new grow size needs to be updates - ClearUpdatedInstancesOnPendingGrow_Locked(backing_store); -} - -void WasmMemoryTracker::TriggerSharedGrowInterruptOnAllIsolates_Locked( - Handle old_buffer) { - // Request a GrowShareMemory interrupt on all the isolates that share - // the backing store. - const auto& isolates = isolates_per_buffer_.find(old_buffer->backing_store()); - for (const auto& isolate : isolates->second) { - isolate->stack_guard()->RequestGrowSharedMemory(); - } -} - -void WasmMemoryTracker::UpdateSharedMemoryStateOnInterrupt_Locked( - Isolate* isolate, void* backing_store, size_t new_size) { - // Update objects only if there are memory objects that share this backing - // store, and this isolate is marked as one of the isolates that shares this - // buffer. - if (MemoryObjectsNeedUpdate_Locked(isolate, backing_store)) { - UpdateMemoryObjectsForIsolate_Locked(isolate, backing_store, new_size); - // As the memory objects are updated, add this isolate to a set of isolates - // that are updated on grow. This state is maintained to track if all the - // isolates that share the backing store have hit a StackCheck. - isolates_updated_on_grow_[backing_store].emplace(isolate); - } -} - -bool WasmMemoryTracker::AreAllIsolatesUpdated_Locked( - const void* backing_store) { - const auto& buffer_isolates = isolates_per_buffer_.find(backing_store); - // No isolates share this buffer. - if (buffer_isolates == isolates_per_buffer_.end()) return true; - const auto& updated_isolates = isolates_updated_on_grow_.find(backing_store); - // Some isolates share the buffer, but no isolates have been updated yet. - if (updated_isolates == isolates_updated_on_grow_.end()) return false; - if (buffer_isolates->second == updated_isolates->second) { - // If all the isolates that share this backing_store have hit a stack check, - // and the memory objects have been updated, remove the entry from the - // updatemap, and return true. - isolates_updated_on_grow_.erase(backing_store); - return true; - } - return false; -} - -void WasmMemoryTracker::ClearUpdatedInstancesOnPendingGrow_Locked( - const void* backing_store) { - // On multiple grows to the same buffer, the entries for that buffer should be - // flushed. This is done so that any consecutive grows to the same buffer will - // update all instances that share this buffer. - const auto& value = isolates_updated_on_grow_.find(backing_store); - if (value != isolates_updated_on_grow_.end()) { - value->second.clear(); - } -} - -void WasmMemoryTracker::UpdateMemoryObjectsForIsolate_Locked( - Isolate* isolate, void* backing_store, size_t new_size) { - const auto& result = allocations_.find(backing_store); - if (result == allocations_.end() || !result->second.is_shared) return; - for (const auto& memory_obj_state : result->second.memory_object_vector) { - DCHECK_NE(memory_obj_state.isolate, nullptr); - if (isolate == memory_obj_state.isolate) { - HandleScope scope(isolate); - Handle memory_object = memory_obj_state.memory_object; - DCHECK(memory_object->IsWasmMemoryObject()); - DCHECK(memory_object->array_buffer().is_shared()); - // Permissions adjusted, but create a new buffer with new size - // and old attributes. Buffer has already been allocated, - // just create a new buffer with same backing store. - bool is_external = memory_object->array_buffer().is_external(); - Handle new_buffer = SetupArrayBuffer( - isolate, backing_store, new_size, is_external, SharedFlag::kShared); - memory_obj_state.memory_object->update_instances(isolate, new_buffer); - } - } -} - -bool WasmMemoryTracker::MemoryObjectsNeedUpdate_Locked( - Isolate* isolate, const void* backing_store) { - // Return true if this buffer has memory_objects it needs to update. - const auto& result = allocations_.find(backing_store); - if (result == allocations_.end() || !result->second.is_shared) return false; - // Only update if the buffer has memory objects that need to be updated. - if (result->second.memory_object_vector.empty()) return false; - const auto& isolate_entry = isolates_per_buffer_.find(backing_store); - return (isolate_entry != isolates_per_buffer_.end() && - isolate_entry->second.count(isolate) != 0); -} - -void WasmMemoryTracker::FreeMemoryIfNotShared_Locked( - Isolate* isolate, const void* backing_store) { - RemoveSharedBufferState_Locked(isolate, backing_store); - if (CanFreeSharedMemory_Locked(backing_store)) { - const AllocationData allocation = - ReleaseAllocation_Locked(isolate, backing_store); - CHECK(FreePages(GetPlatformPageAllocator(), allocation.allocation_base, - allocation.allocation_length)); - } -} - -bool WasmMemoryTracker::CanFreeSharedMemory_Locked(const void* backing_store) { - const auto& value = isolates_per_buffer_.find(backing_store); - // If no isolates share this buffer, backing store can be freed. - // Erase the buffer entry. - if (value == isolates_per_buffer_.end() || value->second.empty()) return true; - return false; -} - -void WasmMemoryTracker::RemoveSharedBufferState_Locked( - Isolate* isolate, const void* backing_store) { - if (isolate != nullptr) { - DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(isolate, backing_store); - RemoveIsolateFromBackingStore_Locked(isolate, backing_store); - } else { - // This happens for externalized contents cleanup shared memory state - // associated with this buffer across isolates. - DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(backing_store); - } -} - -void WasmMemoryTracker::DestroyMemoryObjectsAndRemoveIsolateEntry_Locked( - const void* backing_store) { - const auto& result = allocations_.find(backing_store); - CHECK(result != allocations_.end() && result->second.is_shared); - auto& object_vector = result->second.memory_object_vector; - if (object_vector.empty()) return; - for (const auto& mem_obj_state : object_vector) { - GlobalHandles::Destroy(mem_obj_state.memory_object.location()); - } - object_vector.clear(); - // Remove isolate from backing store map. - isolates_per_buffer_.erase(backing_store); -} - -void WasmMemoryTracker::DestroyMemoryObjectsAndRemoveIsolateEntry_Locked( - Isolate* isolate, const void* backing_store) { - // This gets called when an internal handle to the ArrayBuffer should be - // freed, on heap tear down for that isolate, remove the memory objects - // that are associated with this buffer and isolate. - const auto& result = allocations_.find(backing_store); - CHECK(result != allocations_.end() && result->second.is_shared); - auto& object_vector = result->second.memory_object_vector; - if (object_vector.empty()) return; - for (auto it = object_vector.begin(); it != object_vector.end();) { - if (isolate == it->isolate) { - GlobalHandles::Destroy(it->memory_object.location()); - it = object_vector.erase(it); - } else { - ++it; - } - } -} - -void WasmMemoryTracker::RemoveIsolateFromBackingStore_Locked( - Isolate* isolate, const void* backing_store) { - const auto& isolates = isolates_per_buffer_.find(backing_store); - if (isolates == isolates_per_buffer_.end() || isolates->second.empty()) - return; - isolates->second.erase(isolate); -} - -void WasmMemoryTracker::DeleteSharedMemoryObjectsOnIsolate(Isolate* isolate) { - base::MutexGuard scope_lock(&mutex_); - // This is possible for buffers that are externalized, and their handles have - // been freed, the backing store wasn't released because externalized contents - // were using it. - if (isolates_per_buffer_.empty()) return; - for (auto& entry : isolates_per_buffer_) { - if (entry.second.find(isolate) == entry.second.end()) continue; - const void* backing_store = entry.first; - entry.second.erase(isolate); - DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(isolate, backing_store); - } - for (auto& buffer_isolates : isolates_updated_on_grow_) { - auto& isolates = buffer_isolates.second; - isolates.erase(isolate); - } -} - -Handle SetupArrayBuffer(Isolate* isolate, void* backing_store, - size_t size, bool is_external, - SharedFlag shared) { - Handle buffer = - isolate->factory()->NewJSArrayBuffer(shared, AllocationType::kOld); - constexpr bool is_wasm_memory = true; - JSArrayBuffer::Setup(buffer, isolate, is_external, backing_store, size, - shared, is_wasm_memory); - buffer->set_is_detachable(false); - return buffer; -} - -MaybeHandle AllocateAndSetupArrayBuffer(Isolate* isolate, - size_t size, - size_t maximum_size, - SharedFlag shared) { - // Enforce flag-limited maximum allocation size. - if (size > max_mem_bytes()) return {}; - - WasmMemoryTracker* memory_tracker = isolate->wasm_engine()->memory_tracker(); - - // Set by TryAllocateBackingStore or GetEmptyBackingStore - void* allocation_base = nullptr; - size_t allocation_length = 0; - - void* memory = TryAllocateBackingStore(memory_tracker, isolate->heap(), size, - maximum_size, &allocation_base, - &allocation_length); - if (memory == nullptr) return {}; - -#if DEBUG - // Double check the API allocator actually zero-initialized the memory. - const byte* bytes = reinterpret_cast(memory); - for (size_t i = 0; i < size; ++i) { - DCHECK_EQ(0, bytes[i]); - } -#endif - - reinterpret_cast(isolate) - ->AdjustAmountOfExternalAllocatedMemory(size); - - constexpr bool is_external = false; - return SetupArrayBuffer(isolate, memory, size, is_external, shared); -} - -MaybeHandle NewArrayBuffer(Isolate* isolate, size_t size) { - return AllocateAndSetupArrayBuffer(isolate, size, size, - SharedFlag::kNotShared); -} - -MaybeHandle NewSharedArrayBuffer(Isolate* isolate, - size_t initial_size, - size_t max_size) { - return AllocateAndSetupArrayBuffer(isolate, initial_size, max_size, - SharedFlag::kShared); -} - -void DetachMemoryBuffer(Isolate* isolate, Handle buffer, - bool free_memory) { - if (buffer->is_shared()) return; // Detaching shared buffers is impossible. - DCHECK(!buffer->is_detachable()); - - const bool is_external = buffer->is_external(); - DCHECK(!buffer->is_detachable()); - if (!is_external) { - buffer->set_is_external(true); - isolate->heap()->UnregisterArrayBuffer(*buffer); - if (free_memory) { - // We need to free the memory before detaching the buffer because - // FreeBackingStore reads buffer->allocation_base(), which is nulled out - // by Detach. This means there is a dangling pointer until we detach the - // buffer. Since there is no way for the user to directly call - // FreeBackingStore, we can ensure this is safe. - buffer->FreeBackingStoreFromMainThread(); - } - } - - DCHECK(buffer->is_external()); - buffer->set_is_wasm_memory(false); - buffer->set_is_detachable(true); - buffer->Detach(); -} - -} // namespace wasm -} // namespace internal -} // namespace v8 diff --git a/src/wasm/wasm-memory.h b/src/wasm/wasm-memory.h deleted file mode 100644 index ecb6203ac5..0000000000 --- a/src/wasm/wasm-memory.h +++ /dev/null @@ -1,289 +0,0 @@ -// Copyright 2017 the V8 project authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef V8_WASM_WASM_MEMORY_H_ -#define V8_WASM_WASM_MEMORY_H_ - -#include -#include -#include - -#include "src/base/platform/mutex.h" -#include "src/flags/flags.h" -#include "src/handles/handles.h" -#include "src/objects/js-array-buffer.h" - -namespace v8 { -namespace internal { -namespace wasm { - -// The {WasmMemoryTracker} tracks reservations and allocations for wasm memory -// and wasm code. There is an upper limit on the total reserved memory which is -// checked by this class. Allocations are stored so we can look them up when an -// array buffer dies and figure out the reservation and allocation bounds for -// that buffer. -class WasmMemoryTracker { - public: - WasmMemoryTracker() = default; - V8_EXPORT_PRIVATE ~WasmMemoryTracker(); - - // ReserveAddressSpace attempts to increase the reserved address space counter - // by {num_bytes}. Returns true if successful (meaning it is okay to go ahead - // and reserve {num_bytes} bytes), false otherwise. - bool ReserveAddressSpace(size_t num_bytes); - - void RegisterAllocation(Isolate* isolate, void* allocation_base, - size_t allocation_length, void* buffer_start, - size_t buffer_length); - - struct SharedMemoryObjectState { - Handle memory_object; - Isolate* isolate; - - SharedMemoryObjectState() = default; - SharedMemoryObjectState(Handle memory_object, - Isolate* isolate) - : memory_object(memory_object), isolate(isolate) {} - }; - - struct AllocationData { - void* allocation_base = nullptr; - size_t allocation_length = 0; - void* buffer_start = nullptr; - size_t buffer_length = 0; - bool is_shared = false; - // Wasm memories are growable by default, this will be false only when - // shared with an asmjs module. - bool is_growable = true; - - // Track Wasm Memory instances across isolates, this is populated on - // PostMessage using persistent handles for memory objects. - std::vector - memory_object_vector; - - private: - AllocationData() = default; - AllocationData(void* allocation_base, size_t allocation_length, - void* buffer_start, size_t buffer_length) - : allocation_base(allocation_base), - allocation_length(allocation_length), - buffer_start(buffer_start), - buffer_length(buffer_length) { - DCHECK_LE(reinterpret_cast(allocation_base), - reinterpret_cast(buffer_start)); - DCHECK_GE( - reinterpret_cast(allocation_base) + allocation_length, - reinterpret_cast(buffer_start)); - DCHECK_GE( - reinterpret_cast(allocation_base) + allocation_length, - reinterpret_cast(buffer_start) + buffer_length); - } - - friend WasmMemoryTracker; - }; - - // Allow tests to allocate a backing store the same way as we do it for - // WebAssembly memory. This is used in unit tests for trap handler to - // generate the same signals/exceptions for invalid memory accesses as - // we would get with WebAssembly memory. - V8_EXPORT_PRIVATE void* TryAllocateBackingStoreForTesting( - Heap* heap, size_t size, void** allocation_base, - size_t* allocation_length); - - // Free memory allocated with TryAllocateBackingStoreForTesting. - V8_EXPORT_PRIVATE void FreeBackingStoreForTesting(base::AddressRegion memory, - void* buffer_start); - - // Decreases the amount of reserved address space. - void ReleaseReservation(size_t num_bytes); - - V8_EXPORT_PRIVATE bool IsWasmMemory(const void* buffer_start); - - bool IsWasmSharedMemory(const void* buffer_start); - - // Returns a pointer to a Wasm buffer's allocation data, or nullptr if the - // buffer is not tracked. - V8_EXPORT_PRIVATE const AllocationData* FindAllocationData( - const void* buffer_start); - - // Free Memory allocated by the Wasm memory tracker - bool FreeWasmMemory(Isolate* isolate, const void* buffer_start); - - void MarkWasmMemoryNotGrowable(Handle buffer); - - bool IsWasmMemoryGrowable(Handle buffer); - - // When WebAssembly.Memory is transferred over PostMessage, register the - // allocation as shared and track the memory objects that will need - // updating if memory is resized. - void RegisterWasmMemoryAsShared(Handle object, - Isolate* isolate); - - // This method is called when the underlying backing store is grown, but - // instances that share the backing_store have not yet been updated. - void SetPendingUpdateOnGrow(Handle old_buffer, - size_t new_size); - - // Interrupt handler for GROW_SHARED_MEMORY interrupt. Update memory objects - // and instances that share the memory objects after a Grow call. - void UpdateSharedMemoryInstances(Isolate* isolate); - - // Due to timing of when buffers are garbage collected, vs. when isolate - // object handles are destroyed, it is possible to leak global handles. To - // avoid this, cleanup any global handles on isolate destruction if any exist. - void DeleteSharedMemoryObjectsOnIsolate(Isolate* isolate); - - // Allocation results are reported to UMA - // - // See wasm_memory_allocation_result in counters.h - enum class AllocationStatus { - kSuccess, // Succeeded on the first try - - kSuccessAfterRetry, // Succeeded after garbage collection - - kAddressSpaceLimitReachedFailure, // Failed because Wasm is at its address - // space limit - - kOtherFailure // Failed for an unknown reason - }; - - private: - // Helper methods to free memory only if not shared by other isolates, memory - // objects. - void FreeMemoryIfNotShared_Locked(Isolate* isolate, - const void* backing_store); - bool CanFreeSharedMemory_Locked(const void* backing_store); - void RemoveSharedBufferState_Locked(Isolate* isolate, - const void* backing_store); - - // Registers the allocation as shared, and tracks all the memory objects - // associates with this allocation across isolates. - void RegisterSharedWasmMemory_Locked(Handle object, - Isolate* isolate); - - // Map the new size after grow to the buffer backing store, so that instances - // and memory objects that share the WebAssembly.Memory across isolates can - // be updated.. - void AddBufferToGrowMap_Locked(Handle old_buffer, - size_t new_size); - - // Trigger a GROW_SHARED_MEMORY interrupt on all the isolates that have memory - // objects that share this buffer. - void TriggerSharedGrowInterruptOnAllIsolates_Locked( - Handle old_buffer); - - // When isolates hit a stack check, update the memory objects associated with - // that isolate. - void UpdateSharedMemoryStateOnInterrupt_Locked(Isolate* isolate, - void* backing_store, - size_t new_size); - - // Check if all the isolates that share a backing_store have hit a stack - // check. If a stack check is hit, and the backing store is pending grow, - // this isolate will have updated memory objects. - bool AreAllIsolatesUpdated_Locked(const void* backing_store); - - // If a grow call is made to a buffer with a pending grow, and all the - // isolates that share this buffer have not hit a StackCheck, clear the set of - // already updated instances so they can be updated with the new size on the - // most recent grow call. - void ClearUpdatedInstancesOnPendingGrow_Locked(const void* backing_store); - - // Helper functions to update memory objects on grow, and maintain state for - // which isolates hit a stack check. - void UpdateMemoryObjectsForIsolate_Locked(Isolate* isolate, - void* backing_store, - size_t new_size); - bool MemoryObjectsNeedUpdate_Locked(Isolate* isolate, - const void* backing_store); - - // Destroy global handles to memory objects, and remove backing store from - // isolates_per_buffer on Free. - void DestroyMemoryObjectsAndRemoveIsolateEntry_Locked( - Isolate* isolate, const void* backing_store); - void DestroyMemoryObjectsAndRemoveIsolateEntry_Locked( - const void* backing_store); - - void RemoveIsolateFromBackingStore_Locked(Isolate* isolate, - const void* backing_store); - - // Removes an allocation from the tracker. - AllocationData ReleaseAllocation_Locked(Isolate* isolate, - const void* buffer_start); - - // Clients use a two-part process. First they "reserve" the address space, - // which signifies an intent to actually allocate it. This determines whether - // doing the allocation would put us over our limit. Once there is a - // reservation, clients can do the allocation and register the result. - // - // We should always have: - // allocated_address_space_ <= reserved_address_space_ <= kAddressSpaceLimit - std::atomic reserved_address_space_{0}; - - // Used to protect access to the allocated address space counter and - // allocation map. This is needed because Wasm memories can be freed on - // another thread by the ArrayBufferTracker. - base::Mutex mutex_; - - size_t allocated_address_space_ = 0; - - ////////////////////////////////////////////////////////////////////////////// - // Protected by {mutex_}: - - // Track Wasm memory allocation information. This is keyed by the start of the - // buffer, rather than by the start of the allocation. - std::unordered_map allocations_; - - // Maps each buffer to the isolates that share the backing store. - std::unordered_map> - isolates_per_buffer_; - - // Maps which isolates have had a grow interrupt handled on the buffer. This - // is maintained to ensure that the instances are updated with the right size - // on Grow. - std::unordered_map> - isolates_updated_on_grow_; - - // Maps backing stores(void*) to the size of the underlying memory in - // (size_t). An entry to this map is made on a grow call to the corresponding - // backing store. On consecutive grow calls to the same backing store, - // the size entry is updated. This entry is made right after the mprotect - // call to change the protections on a backing_store, so the memory objects - // have not been updated yet. The backing store entry in this map is erased - // when all the memory objects, or instances that share this backing store - // have their bounds updated. - std::unordered_map grow_update_map_; - - // End of fields protected by {mutex_}. - ////////////////////////////////////////////////////////////////////////////// - - DISALLOW_COPY_AND_ASSIGN(WasmMemoryTracker); -}; - -// Attempts to allocate an array buffer with guard regions suitable for trap -// handling. If address space is not available, it will return a buffer with -// mini-guards that will require bounds checks. -V8_EXPORT_PRIVATE MaybeHandle NewArrayBuffer(Isolate*, - size_t size); - -// Attempts to allocate a SharedArrayBuffer with guard regions suitable for -// trap handling. If address space is not available, it will try to reserve -// up to the maximum for that memory. If all else fails, it will return a -// buffer with mini-guards of initial size. -V8_EXPORT_PRIVATE MaybeHandle NewSharedArrayBuffer( - Isolate*, size_t initial_size, size_t max_size); - -Handle SetupArrayBuffer( - Isolate*, void* backing_store, size_t size, bool is_external, - SharedFlag shared = SharedFlag::kNotShared); - -V8_EXPORT_PRIVATE void DetachMemoryBuffer(Isolate* isolate, - Handle buffer, - bool free_memory); - -} // namespace wasm -} // namespace internal -} // namespace v8 - -#endif // V8_WASM_WASM_MEMORY_H_ diff --git a/src/wasm/wasm-module.cc b/src/wasm/wasm-module.cc index 05057301ed..aef9180b9a 100644 --- a/src/wasm/wasm-module.cc +++ b/src/wasm/wasm-module.cc @@ -272,21 +272,19 @@ Handle GetCustomSections(Isolate* isolate, // Make a copy of the payload data in the section. size_t size = section.payload.length(); - void* memory = - size == 0 ? nullptr : isolate->array_buffer_allocator()->Allocate(size); - - if (size && !memory) { + MaybeHandle result = + isolate->factory()->NewJSArrayBufferAndBackingStore( + size, InitializedFlag::kUninitialized); + Handle array_buffer; + if (!result.ToHandle(&array_buffer)) { thrower->RangeError("out of memory allocating custom section data"); return Handle(); } - Handle buffer = - isolate->factory()->NewJSArrayBuffer(SharedFlag::kNotShared); - constexpr bool is_external = false; - JSArrayBuffer::Setup(buffer, isolate, is_external, memory, size); - memcpy(memory, wire_bytes.begin() + section.payload.offset(), + memcpy(array_buffer->backing_store(), + wire_bytes.begin() + section.payload.offset(), section.payload.length()); - matching_sections.push_back(buffer); + matching_sections.push_back(array_buffer); } int num_custom_sections = static_cast(matching_sections.size()); diff --git a/src/wasm/wasm-objects.cc b/src/wasm/wasm-objects.cc index ebadb1fbdf..654b795cf7 100644 --- a/src/wasm/wasm-objects.cc +++ b/src/wasm/wasm-objects.cc @@ -25,7 +25,6 @@ #include "src/wasm/wasm-code-manager.h" #include "src/wasm/wasm-engine.h" #include "src/wasm/wasm-limits.h" -#include "src/wasm/wasm-memory.h" #include "src/wasm/wasm-module.h" #include "src/wasm/wasm-objects-inl.h" #include "src/wasm/wasm-text.h" @@ -1233,66 +1232,17 @@ void WasmIndirectFunctionTable::Resize(Isolate* isolate, } namespace { -bool AdjustBufferPermissions(Isolate* isolate, Handle old_buffer, - size_t new_size) { - if (new_size > old_buffer->allocation_length()) return false; - void* old_mem_start = old_buffer->backing_store(); - size_t old_size = old_buffer->byte_length(); - if (old_size != new_size) { - DCHECK_NOT_NULL(old_mem_start); - DCHECK_GE(new_size, old_size); - // If adjusting permissions fails, propagate error back to return - // failure to grow. - if (!i::SetPermissions(GetPlatformPageAllocator(), old_mem_start, new_size, - PageAllocator::kReadWrite)) { - return false; - } - reinterpret_cast(isolate) - ->AdjustAmountOfExternalAllocatedMemory(new_size - old_size); - } - return true; -} -MaybeHandle MemoryGrowBuffer(Isolate* isolate, - Handle old_buffer, - size_t new_size) { - CHECK_EQ(0, new_size % wasm::kWasmPageSize); - // Reusing the backing store from externalized buffers causes problems with - // Blink's array buffers. The connection between the two is lost, which can - // lead to Blink not knowing about the other reference to the buffer and - // freeing it too early. - if (old_buffer->is_external() || new_size > old_buffer->allocation_length()) { - // We couldn't reuse the old backing store, so create a new one and copy the - // old contents in. - Handle new_buffer; - if (!wasm::NewArrayBuffer(isolate, new_size).ToHandle(&new_buffer)) { - return {}; - } - void* old_mem_start = old_buffer->backing_store(); - size_t old_size = old_buffer->byte_length(); - if (old_size == 0) return new_buffer; - memcpy(new_buffer->backing_store(), old_mem_start, old_size); - DCHECK(old_buffer.is_null() || !old_buffer->is_shared()); - constexpr bool free_memory = true; - i::wasm::DetachMemoryBuffer(isolate, old_buffer, free_memory); - return new_buffer; - } else { - if (!AdjustBufferPermissions(isolate, old_buffer, new_size)) return {}; - // NOTE: We must allocate a new array buffer here because the spec - // assumes that ArrayBuffers do not change size. - void* backing_store = old_buffer->backing_store(); - bool is_external = old_buffer->is_external(); - // Disconnect buffer early so GC won't free it. - i::wasm::DetachMemoryBuffer(isolate, old_buffer, false); - Handle new_buffer = - wasm::SetupArrayBuffer(isolate, backing_store, new_size, is_external); - return new_buffer; - } -} - -// May GC, because SetSpecializationMemInfoFrom may GC void SetInstanceMemory(Handle instance, Handle buffer) { + bool is_wasm_module = instance->module()->origin == wasm::kWasmOrigin; + bool use_trap_handler = + instance->module_object().native_module()->use_trap_handler(); + // Wasm modules compiled to use the trap handler don't have bounds checks, + // so they must have a memory that has guard regions. + CHECK_IMPLIES(is_wasm_module && use_trap_handler, + buffer->GetBackingStore()->has_guard_regions()); + instance->SetRawMemory(reinterpret_cast(buffer->backing_store()), buffer->byte_length()); #if DEBUG @@ -1310,7 +1260,6 @@ void SetInstanceMemory(Handle instance, } #endif } - } // namespace Handle WasmMemoryObject::New( @@ -1318,44 +1267,47 @@ Handle WasmMemoryObject::New( uint32_t maximum) { Handle buffer; if (!maybe_buffer.ToHandle(&buffer)) { - // If no buffer was provided, create a 0-length one. - buffer = wasm::SetupArrayBuffer(isolate, nullptr, 0, false); + // If no buffer was provided, create a zero-length one. + auto clamped_maximum = + std::min(static_cast(maximum), wasm::kV8MaxWasmMemoryPages); + auto backing_store = BackingStore::AllocateWasmMemory( + isolate, 0, clamped_maximum, SharedFlag::kNotShared); + buffer = isolate->factory()->NewJSArrayBuffer(); + buffer->Attach(std::move(backing_store)); } - // TODO(kschimpf): Do we need to add an argument that defines the - // style of memory the user prefers (with/without trap handling), so - // that the memory will match the style of the compiled wasm module. - // See issue v8:7143 Handle memory_ctor( isolate->native_context()->wasm_memory_constructor(), isolate); - auto memory_obj = Handle::cast( + auto memory_object = Handle::cast( isolate->factory()->NewJSObject(memory_ctor, AllocationType::kOld)); - memory_obj->set_array_buffer(*buffer); - memory_obj->set_maximum_pages(maximum); + memory_object->set_array_buffer(*buffer); + memory_object->set_maximum_pages(maximum); - return memory_obj; + if (buffer->is_shared()) { + auto backing_store = buffer->GetBackingStore(); + backing_store->AttachSharedWasmMemoryObject(isolate, memory_object); + } + + return memory_object; } MaybeHandle WasmMemoryObject::New(Isolate* isolate, uint32_t initial, uint32_t maximum, - bool is_shared_memory) { - Handle buffer; - size_t size = static_cast(i::wasm::kWasmPageSize) * - static_cast(initial); - if (is_shared_memory) { - size_t max_size = static_cast(i::wasm::kWasmPageSize) * - static_cast(maximum); - if (!i::wasm::NewSharedArrayBuffer(isolate, size, max_size) - .ToHandle(&buffer)) { - return {}; - } - } else { - if (!i::wasm::NewArrayBuffer(isolate, size).ToHandle(&buffer)) { - return {}; - } - } + SharedFlag shared) { + auto backing_store = + BackingStore::AllocateWasmMemory(isolate, initial, maximum, shared); + + if (!backing_store) return {}; + + Handle buffer = + (shared == SharedFlag::kShared) + ? isolate->factory()->NewJSSharedArrayBuffer() + : isolate->factory()->NewJSArrayBuffer(); + + buffer->Attach(std::move(backing_store)); + return New(isolate, buffer, maximum); } @@ -1399,11 +1351,11 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate, uint32_t pages) { TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "GrowMemory"); Handle old_buffer(memory_object->array_buffer(), isolate); - if (old_buffer->is_shared() && !FLAG_wasm_grow_shared_memory) return -1; - auto* memory_tracker = isolate->wasm_engine()->memory_tracker(); - if (!memory_tracker->IsWasmMemoryGrowable(old_buffer)) return -1; + // Any buffer used as an asmjs memory cannot be detached, and + // therefore this memory cannot be grown. + if (old_buffer->is_asmjs_memory()) return -1; - // Checks for maximum memory size, compute new size. + // Checks for maximum memory size. uint32_t maximum_pages = wasm::max_mem_pages(); if (memory_object->has_maximum_pages()) { maximum_pages = std::min( @@ -1418,47 +1370,49 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate, (pages > wasm::max_mem_pages() - old_pages)) { // exceeds limit return -1; } + // Compute new size. size_t new_size = static_cast(old_pages + pages) * wasm::kWasmPageSize; - // Memory is grown, but the memory objects and instances are not yet updated. - // Handle this in the interrupt handler so that it's safe for all the isolates - // that share this buffer to be updated safely. - Handle new_buffer; + std::shared_ptr backing_store = old_buffer->GetBackingStore(); + if (!backing_store) return -1; + + // Try to handle shared memory first. if (old_buffer->is_shared()) { - // Adjust protections for the buffer. - if (!AdjustBufferPermissions(isolate, old_buffer, new_size)) { - return -1; - } - void* backing_store = old_buffer->backing_store(); - if (memory_tracker->IsWasmSharedMemory(backing_store)) { - // This memory is shared between different isolates. - DCHECK(old_buffer->is_shared()); - // Update pending grow state, and trigger a grow interrupt on all the - // isolates that share this buffer. - memory_tracker->SetPendingUpdateOnGrow(old_buffer, new_size); - // Handle interrupts for this isolate so that the instances with this - // isolate are updated. - isolate->stack_guard()->HandleInterrupts(); - // Failure to allocate, or adjust pemissions already handled here, and - // updates to instances handled in the interrupt handler safe to return. - return static_cast(old_size / wasm::kWasmPageSize); - } - // SharedArrayBuffer, but not shared across isolates. Setup a new buffer - // with updated permissions and update the instances. - new_buffer = - wasm::SetupArrayBuffer(isolate, backing_store, new_size, - old_buffer->is_external(), SharedFlag::kShared); - memory_object->update_instances(isolate, new_buffer); - } else { - if (!MemoryGrowBuffer(isolate, old_buffer, new_size) - .ToHandle(&new_buffer)) { - return -1; + if (FLAG_wasm_grow_shared_memory) { + // Shared memories can only be grown in place; no copying. + if (backing_store->GrowWasmMemoryInPlace(isolate, new_size)) { + BackingStore::BroadcastSharedWasmMemoryGrow(isolate, backing_store, + new_size); + // Broadcasting the update should update this memory object too. + DCHECK_NE(*old_buffer, memory_object->array_buffer()); + DCHECK_EQ(new_size, memory_object->array_buffer().byte_length()); + return static_cast(old_pages); // success + } } + return -1; } - // Update instances if any. + + // Try to grow non-shared memory in-place. + if (backing_store->GrowWasmMemoryInPlace(isolate, new_size)) { + // Detach old and create a new one with the grown backing store. + old_buffer->Detach(true); + Handle new_buffer = isolate->factory()->NewJSArrayBuffer(); + new_buffer->Attach(backing_store); + memory_object->update_instances(isolate, new_buffer); + return static_cast(old_pages); // success + } + // Try allocating a new backing store and copying. + std::unique_ptr new_backing_store = + BackingStore::CopyWasmMemory(isolate, backing_store, new_size); + if (!new_backing_store) return -1; + + // Detach old and create a new one with the new backing store. + old_buffer->Detach(true); + Handle new_buffer = isolate->factory()->NewJSArrayBuffer(); + new_buffer->Attach(std::move(new_backing_store)); memory_object->update_instances(isolate, new_buffer); - return static_cast(old_size / wasm::kWasmPageSize); + return static_cast(old_pages); // success } // static @@ -1492,18 +1446,15 @@ MaybeHandle WasmGlobalObject::New( global_obj->set_tagged_buffer(*tagged_buffer); } else { DCHECK(maybe_tagged_buffer.is_null()); - Handle untagged_buffer; uint32_t type_size = wasm::ValueTypes::ElementSizeInBytes(type); - if (!maybe_untagged_buffer.ToHandle(&untagged_buffer)) { - // If no buffer was provided, create one long enough for the given type. - untagged_buffer = isolate->factory()->NewJSArrayBuffer( - SharedFlag::kNotShared, AllocationType::kOld); - const bool initialize = true; - if (!JSArrayBuffer::SetupAllocatingData(untagged_buffer, isolate, - type_size, initialize)) { - return {}; - } + Handle untagged_buffer; + if (!maybe_untagged_buffer.ToHandle(&untagged_buffer)) { + MaybeHandle result = + isolate->factory()->NewJSArrayBufferAndBackingStore( + offset + type_size, InitializedFlag::kZeroInitialized); + + if (!result.ToHandle(&untagged_buffer)) return {}; } // Check that the offset is in bounds. diff --git a/src/wasm/wasm-objects.h b/src/wasm/wasm-objects.h index cf76e44681..62bdc6dedf 100644 --- a/src/wasm/wasm-objects.h +++ b/src/wasm/wasm-objects.h @@ -46,6 +46,8 @@ class WasmJSFunction; class WasmModuleObject; class WasmIndirectFunctionTable; +enum class SharedFlag : uint8_t; + template class Managed; @@ -357,9 +359,10 @@ class WasmMemoryObject : public JSObject { V8_EXPORT_PRIVATE static Handle New( Isolate* isolate, MaybeHandle buffer, uint32_t maximum); - V8_EXPORT_PRIVATE static MaybeHandle New( - Isolate* isolate, uint32_t initial, uint32_t maximum, - bool is_shared_memory); + V8_EXPORT_PRIVATE static MaybeHandle New(Isolate* isolate, + uint32_t initial, + uint32_t maximum, + SharedFlag shared); void update_instances(Isolate* isolate, Handle buffer); diff --git a/test/cctest/BUILD.gn b/test/cctest/BUILD.gn index ac0d51faca..4b96882ecd 100644 --- a/test/cctest/BUILD.gn +++ b/test/cctest/BUILD.gn @@ -153,6 +153,7 @@ v8_source_set("cctest_sources") { "interpreter/test-source-positions.cc", "libplatform/test-tracing.cc", "libsampler/test-sampler.cc", + "manually-externalized-buffer.h", "parsing/test-parse-decision.cc", "parsing/test-preparser.cc", "parsing/test-scanner-streams.cc", @@ -177,6 +178,7 @@ v8_source_set("cctest_sources") { "test-api.h", "test-array-list.cc", "test-atomicops.cc", + "test-backing-store.cc", "test-bignum-dtoa.cc", "test-bignum.cc", "test-bit-vector.cc", @@ -258,6 +260,7 @@ v8_source_set("cctest_sources") { "unicode-helpers.cc", "unicode-helpers.h", "wasm/test-c-wasm-entry.cc", + "wasm/test-grow-memory.cc", "wasm/test-jump-table-assembler.cc", "wasm/test-run-wasm-64.cc", "wasm/test-run-wasm-asmjs.cc", diff --git a/test/cctest/cctest.status b/test/cctest/cctest.status index 17d0096140..48e31e5d14 100644 --- a/test/cctest/cctest.status +++ b/test/cctest/cctest.status @@ -471,8 +471,10 @@ 'test-api/WasmI32AtomicWaitCallback': [SKIP], 'test-api/WasmI64AtomicWaitCallback': [SKIP], 'test-api/WasmStreaming*': [SKIP], + 'test-backing-store/Run_WasmModule_Buffer_Externalized_Regression_UseAfterFree': [SKIP], 'test-c-wasm-entry/*': [SKIP], 'test-jump-table-assembler/*': [SKIP], + 'test-grow-memory/*': [SKIP], 'test-run-wasm-64/*': [SKIP], 'test-run-wasm-asmjs/*': [SKIP], 'test-run-wasm-atomics64/*': [SKIP], diff --git a/test/cctest/heap/test-array-buffer-tracker.cc b/test/cctest/heap/test-array-buffer-tracker.cc index b4122c9619..66354cab7f 100644 --- a/test/cctest/heap/test-array-buffer-tracker.cc +++ b/test/cctest/heap/test-array-buffer-tracker.cc @@ -193,8 +193,8 @@ TEST(ArrayBuffer_UnregisterDuringSweep) { // barriers and proper synchronization this will trigger a data race on // TSAN. v8::ArrayBuffer::Contents contents = ab->Externalize(); - heap->isolate()->array_buffer_allocator()->Free(contents.Data(), - contents.ByteLength()); + contents.Deleter()(contents.Data(), contents.ByteLength(), + contents.DeleterData()); } } diff --git a/test/cctest/heap/test-page-promotion.cc b/test/cctest/heap/test-page-promotion.cc index df6211826e..5d4a543719 100644 --- a/test/cctest/heap/test-page-promotion.cc +++ b/test/cctest/heap/test-page-promotion.cc @@ -146,8 +146,10 @@ UNINITIALIZED_TEST(PagePromotion_NewToNewJSArrayBuffer) { heap::FillCurrentPage(heap->new_space()); // Allocate a buffer we would like to check against. Handle buffer = - i_isolate->factory()->NewJSArrayBuffer(SharedFlag::kNotShared); - CHECK(JSArrayBuffer::SetupAllocatingData(buffer, i_isolate, 100)); + i_isolate->factory() + ->NewJSArrayBufferAndBackingStore(100, + InitializedFlag::kZeroInitialized) + .ToHandleChecked(); std::vector> handles; // Simulate a full space, filling the interesting page with live objects. heap::SimulateFullSpace(heap->new_space(), &handles); @@ -188,8 +190,10 @@ UNINITIALIZED_TEST(PagePromotion_NewToOldJSArrayBuffer) { heap::FillCurrentPage(heap->new_space()); // Allocate a buffer we would like to check against. Handle buffer = - i_isolate->factory()->NewJSArrayBuffer(SharedFlag::kNotShared); - CHECK(JSArrayBuffer::SetupAllocatingData(buffer, i_isolate, 100)); + i_isolate->factory() + ->NewJSArrayBufferAndBackingStore(100, + InitializedFlag::kZeroInitialized) + .ToHandleChecked(); std::vector> handles; // Simulate a full space, filling the interesting page with live objects. heap::SimulateFullSpace(heap->new_space(), &handles); diff --git a/test/cctest/manually-externalized-buffer.h b/test/cctest/manually-externalized-buffer.h new file mode 100644 index 0000000000..b5eeed7382 --- /dev/null +++ b/test/cctest/manually-externalized-buffer.h @@ -0,0 +1,34 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_CCTEST_MANUALLY_EXTERNALIZED_BUFFER_H_ +#define V8_CCTEST_MANUALLY_EXTERNALIZED_BUFFER_H_ + +#include "src/api/api-inl.h" + +namespace v8 { +namespace internal { +namespace testing { + +// Utility to free the allocated memory for a buffer that is manually +// externalized in a test. +struct ManuallyExternalizedBuffer { + Handle buffer_; + v8::ArrayBuffer::Contents contents_; + + explicit ManuallyExternalizedBuffer(Handle buffer) + : buffer_(buffer), + contents_(v8::Utils::ToLocal(buffer_)->Externalize()) {} + ~ManuallyExternalizedBuffer() { + contents_.Deleter()(contents_.Data(), contents_.ByteLength(), + contents_.DeleterData()); + } + void* backing_store() { return contents_.Data(); } +}; + +} // namespace testing +} // namespace internal +} // namespace v8 + +#endif // V8_CCTEST_MANUALLY_EXTERNALIZED_BUFFER_H_ diff --git a/test/cctest/test-api-array-buffer.cc b/test/cctest/test-api-array-buffer.cc index 5b8433a6a2..0655292ff7 100644 --- a/test/cctest/test-api-array-buffer.cc +++ b/test/cctest/test-api-array-buffer.cc @@ -17,7 +17,10 @@ class ScopedArrayBufferContents { public: explicit ScopedArrayBufferContents(const v8::ArrayBuffer::Contents& contents) : contents_(contents) {} - ~ScopedArrayBufferContents() { free(contents_.AllocationBase()); } + ~ScopedArrayBufferContents() { + contents_.Deleter()(contents_.Data(), contents_.ByteLength(), + contents_.DeleterData()); + } void* Data() const { return contents_.Data(); } size_t ByteLength() const { return contents_.ByteLength(); } @@ -36,7 +39,10 @@ class ScopedSharedArrayBufferContents { explicit ScopedSharedArrayBufferContents( const v8::SharedArrayBuffer::Contents& contents) : contents_(contents) {} - ~ScopedSharedArrayBufferContents() { free(contents_.AllocationBase()); } + ~ScopedSharedArrayBufferContents() { + contents_.Deleter()(contents_.Data(), contents_.ByteLength(), + contents_.DeleterData()); + } void* Data() const { return contents_.Data(); } size_t ByteLength() const { return contents_.ByteLength(); } diff --git a/test/cctest/test-backing-store.cc b/test/cctest/test-backing-store.cc new file mode 100644 index 0000000000..f8010d3031 --- /dev/null +++ b/test/cctest/test-backing-store.cc @@ -0,0 +1,85 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/api/api-inl.h" +#include "src/objects/backing-store.h" +#include "src/wasm/wasm-objects.h" + +#include "test/cctest/cctest.h" +#include "test/cctest/manually-externalized-buffer.h" + +namespace v8 { +namespace internal { + +using testing::ManuallyExternalizedBuffer; + +TEST(Run_WasmModule_Buffer_Externalized_Detach) { + { + // Regression test for + // https://bugs.chromium.org/p/chromium/issues/detail?id=731046 + Isolate* isolate = CcTest::InitIsolateOnce(); + HandleScope scope(isolate); + MaybeHandle result = + isolate->factory()->NewJSArrayBufferAndBackingStore( + wasm::kWasmPageSize, InitializedFlag::kZeroInitialized); + Handle buffer = result.ToHandleChecked(); + + // Embedder requests contents. + ManuallyExternalizedBuffer external(buffer); + + buffer->Detach(); + CHECK(buffer->was_detached()); + + // Make sure we can write to the buffer without crashing + uint32_t* int_buffer = + reinterpret_cast(external.backing_store()); + int_buffer[0] = 0; + // Embedder frees contents. + } + CcTest::CollectAllAvailableGarbage(); +} + +TEST(Run_WasmModule_Buffer_Externalized_Regression_UseAfterFree) { + { + // Regression test for https://crbug.com/813876 + Isolate* isolate = CcTest::InitIsolateOnce(); + HandleScope scope(isolate); + MaybeHandle result = + WasmMemoryObject::New(isolate, 1, 1, SharedFlag::kNotShared); + Handle memory_object = result.ToHandleChecked(); + Handle buffer(memory_object->array_buffer(), isolate); + + { + // Embedder requests contents. + ManuallyExternalizedBuffer external(buffer); + + // Growing (even by 0) detaches the old buffer. + WasmMemoryObject::Grow(isolate, memory_object, 0); + CHECK(buffer->was_detached()); + + // Embedder frees contents. + } + + // Make sure the memory object has a new buffer that can be written to. + uint32_t* int_buffer = reinterpret_cast( + memory_object->array_buffer().backing_store()); + int_buffer[0] = 0; + } + CcTest::CollectAllAvailableGarbage(); +} + +#if V8_TARGET_ARCH_64_BIT +TEST(BackingStore_Reclaim) { + // Make sure we can allocate memories without running out of address space. + Isolate* isolate = CcTest::InitIsolateOnce(); + for (int i = 0; i < 256; ++i) { + auto backing_store = + BackingStore::AllocateWasmMemory(isolate, 1, 1, SharedFlag::kNotShared); + CHECK(backing_store); + } +} +#endif + +} // namespace internal +} // namespace v8 diff --git a/test/cctest/test-code-stub-assembler.cc b/test/cctest/test-code-stub-assembler.cc index e7fc946675..af47e082f3 100644 --- a/test/cctest/test-code-stub-assembler.cc +++ b/test/cctest/test-code-stub-assembler.cc @@ -1559,8 +1559,8 @@ TEST(TryLookupElement) { v8::ArrayBuffer::Contents contents = buffer->Externalize(); buffer->Detach(); - isolate->array_buffer_allocator()->Free(contents.Data(), - contents.ByteLength()); + contents.Deleter()(contents.Data(), contents.ByteLength(), + contents.DeleterData()); CHECK_ABSENT(object, 0); CHECK_ABSENT(object, 1); diff --git a/test/cctest/test-heap-profiler.cc b/test/cctest/test-heap-profiler.cc index e534670bb6..6104bf150b 100644 --- a/test/cctest/test-heap-profiler.cc +++ b/test/cctest/test-heap-profiler.cc @@ -2913,7 +2913,8 @@ TEST(ArrayBufferSharedBackingStore) { CHECK(ab2_data); CHECK_EQ(ab1_data, ab2_data); CHECK_EQ(2, GetRetainersCount(snapshot, ab1_data)); - free(data); + ab_contents.Deleter()(ab_contents.Data(), ab_contents.ByteLength(), + ab_contents.DeleterData()); } diff --git a/test/cctest/test-roots.cc b/test/cctest/test-roots.cc index d041903639..22ef2ae66f 100644 --- a/test/cctest/test-roots.cc +++ b/test/cctest/test-roots.cc @@ -46,6 +46,7 @@ bool IsInitiallyMutable(Factory* factory, Address object_address) { V(detached_contexts) \ V(dirty_js_finalization_groups) \ V(feedback_vectors_for_profiling_tools) \ + V(shared_wasm_memories) \ V(materialized_objects) \ V(noscript_shared_function_infos) \ V(public_symbol_table) \ diff --git a/test/cctest/wasm/test-grow-memory.cc b/test/cctest/wasm/test-grow-memory.cc new file mode 100644 index 0000000000..a188707cae --- /dev/null +++ b/test/cctest/wasm/test-grow-memory.cc @@ -0,0 +1,131 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/wasm/wasm-objects-inl.h" +#include "src/wasm/wasm-opcodes.h" + +#include "src/wasm/wasm-module-builder.h" +#include "test/cctest/cctest.h" +#include "test/cctest/manually-externalized-buffer.h" +#include "test/common/wasm/flag-utils.h" +#include "test/common/wasm/test-signatures.h" +#include "test/common/wasm/wasm-macro-gen.h" +#include "test/common/wasm/wasm-module-runner.h" + +namespace v8 { +namespace internal { +namespace wasm { +namespace test_grow_memory { + +using testing::CompileAndInstantiateForTesting; +using v8::internal::testing::ManuallyExternalizedBuffer; + +namespace { +void ExportAsMain(WasmFunctionBuilder* f) { + f->builder()->AddExport(CStrVector("main"), f); +} +#define EMIT_CODE_WITH_END(f, code) \ + do { \ + f->EmitCode(code, sizeof(code)); \ + f->Emit(kExprEnd); \ + } while (false) + +void Cleanup(Isolate* isolate = CcTest::InitIsolateOnce()) { + // By sending a low memory notifications, we will try hard to collect all + // garbage and will therefore also invoke all weak callbacks of actually + // unreachable persistent handles. + reinterpret_cast(isolate)->LowMemoryNotification(); +} +} // namespace + +TEST(GrowMemDetaches) { + { + Isolate* isolate = CcTest::InitIsolateOnce(); + HandleScope scope(isolate); + Handle memory_object = + WasmMemoryObject::New(isolate, 16, 100, SharedFlag::kNotShared) + .ToHandleChecked(); + Handle buffer(memory_object->array_buffer(), isolate); + int32_t result = WasmMemoryObject::Grow(isolate, memory_object, 0); + CHECK_EQ(16, result); + CHECK_NE(*buffer, memory_object->array_buffer()); + CHECK(buffer->was_detached()); + } + Cleanup(); +} + +TEST(Externalized_GrowMemMemSize) { + { + Isolate* isolate = CcTest::InitIsolateOnce(); + HandleScope scope(isolate); + Handle memory_object = + WasmMemoryObject::New(isolate, 16, 100, SharedFlag::kNotShared) + .ToHandleChecked(); + ManuallyExternalizedBuffer external( + handle(memory_object->array_buffer(), isolate)); + int32_t result = WasmMemoryObject::Grow(isolate, memory_object, 0); + CHECK_EQ(16, result); + CHECK_NE(*external.buffer_, memory_object->array_buffer()); + CHECK(external.buffer_->was_detached()); + } + Cleanup(); +} + +TEST(Run_WasmModule_Buffer_Externalized_GrowMem) { + { + Isolate* isolate = CcTest::InitIsolateOnce(); + HandleScope scope(isolate); + TestSignatures sigs; + v8::internal::AccountingAllocator allocator; + Zone zone(&allocator, ZONE_NAME); + + WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone); + WasmFunctionBuilder* f = builder->AddFunction(sigs.i_v()); + ExportAsMain(f); + byte code[] = {WASM_GROW_MEMORY(WASM_I32V_1(6)), WASM_DROP, + WASM_MEMORY_SIZE}; + EMIT_CODE_WITH_END(f, code); + + ZoneBuffer buffer(&zone); + builder->WriteTo(&buffer); + testing::SetupIsolateForWasmModule(isolate); + ErrorThrower thrower(isolate, "Test"); + const Handle instance = + CompileAndInstantiateForTesting( + isolate, &thrower, ModuleWireBytes(buffer.begin(), buffer.end())) + .ToHandleChecked(); + Handle memory_object(instance->memory_object(), isolate); + + // Fake the Embedder flow by externalizing the array buffer. + ManuallyExternalizedBuffer external1( + handle(memory_object->array_buffer(), isolate)); + + // Grow using the API. + uint32_t result = WasmMemoryObject::Grow(isolate, memory_object, 4); + CHECK_EQ(16, result); + CHECK(external1.buffer_->was_detached()); // growing always detaches + CHECK_EQ(0, external1.buffer_->byte_length()); + + CHECK_NE(*external1.buffer_, memory_object->array_buffer()); + + // Fake the Embedder flow by externalizing the array buffer. + ManuallyExternalizedBuffer external2( + handle(memory_object->array_buffer(), isolate)); + + // Grow using an internal WASM bytecode. + result = testing::RunWasmModuleForTesting(isolate, instance, 0, nullptr); + CHECK_EQ(26, result); + CHECK(external2.buffer_->was_detached()); // growing always detaches + CHECK_EQ(0, external2.buffer_->byte_length()); + CHECK_NE(*external2.buffer_, memory_object->array_buffer()); + } + Cleanup(); +} + +} // namespace test_grow_memory +} // namespace wasm +} // namespace internal +} // namespace v8 + +#undef EMIT_CODE_WITH_END diff --git a/test/cctest/wasm/test-run-wasm-module.cc b/test/cctest/wasm/test-run-wasm-module.cc index 526c5846a2..ba1f98e040 100644 --- a/test/cctest/wasm/test-run-wasm-module.cc +++ b/test/cctest/wasm/test-run-wasm-module.cc @@ -11,7 +11,6 @@ #include "src/utils/version.h" #include "src/wasm/module-decoder.h" #include "src/wasm/wasm-engine.h" -#include "src/wasm/wasm-memory.h" #include "src/wasm/wasm-module-builder.h" #include "src/wasm/wasm-module.h" #include "src/wasm/wasm-objects-inl.h" @@ -941,154 +940,6 @@ TEST(MemoryWithOOBEmptyDataSegment) { Cleanup(); } -// Utility to free the allocated memory for a buffer that is manually -// externalized in a test. -struct ManuallyExternalizedBuffer { - Isolate* isolate_; - Handle buffer_; - void* allocation_base_; - size_t allocation_length_; - bool const should_free_; - - ManuallyExternalizedBuffer(JSArrayBuffer buffer, Isolate* isolate) - : isolate_(isolate), - buffer_(buffer, isolate), - allocation_base_(buffer.allocation_base()), - allocation_length_(buffer.allocation_length()), - should_free_(!isolate_->wasm_engine()->memory_tracker()->IsWasmMemory( - buffer.backing_store())) { - if (!isolate_->wasm_engine()->memory_tracker()->IsWasmMemory( - buffer.backing_store())) { - v8::Utils::ToLocal(buffer_)->Externalize(); - } - } - ~ManuallyExternalizedBuffer() { - if (should_free_) { - buffer_->FreeBackingStoreFromMainThread(); - } - } -}; - -TEST(Run_WasmModule_Buffer_Externalized_GrowMem) { - { - Isolate* isolate = CcTest::InitIsolateOnce(); - HandleScope scope(isolate); - TestSignatures sigs; - v8::internal::AccountingAllocator allocator; - Zone zone(&allocator, ZONE_NAME); - - WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone); - WasmFunctionBuilder* f = builder->AddFunction(sigs.i_v()); - ExportAsMain(f); - byte code[] = {WASM_GROW_MEMORY(WASM_I32V_1(6)), WASM_DROP, - WASM_MEMORY_SIZE}; - EMIT_CODE_WITH_END(f, code); - - ZoneBuffer buffer(&zone); - builder->WriteTo(&buffer); - testing::SetupIsolateForWasmModule(isolate); - ErrorThrower thrower(isolate, "Test"); - const Handle instance = - CompileAndInstantiateForTesting( - isolate, &thrower, ModuleWireBytes(buffer.begin(), buffer.end())) - .ToHandleChecked(); - Handle memory_object(instance->memory_object(), isolate); - - // Fake the Embedder flow by externalizing the array buffer. - ManuallyExternalizedBuffer buffer1(memory_object->array_buffer(), isolate); - - // Grow using the API. - uint32_t result = WasmMemoryObject::Grow(isolate, memory_object, 4); - CHECK_EQ(16, result); - CHECK(buffer1.buffer_->was_detached()); // growing always detaches - CHECK_EQ(0, buffer1.buffer_->byte_length()); - - CHECK_NE(*buffer1.buffer_, memory_object->array_buffer()); - - // Fake the Embedder flow by externalizing the array buffer. - ManuallyExternalizedBuffer buffer2(memory_object->array_buffer(), isolate); - - // Grow using an internal WASM bytecode. - result = testing::RunWasmModuleForTesting(isolate, instance, 0, nullptr); - CHECK_EQ(26, result); - CHECK(buffer2.buffer_->was_detached()); // growing always detaches - CHECK_EQ(0, buffer2.buffer_->byte_length()); - CHECK_NE(*buffer2.buffer_, memory_object->array_buffer()); - } - Cleanup(); -} - -TEST(Run_WasmModule_Buffer_Externalized_GrowMemMemSize) { - { - Isolate* isolate = CcTest::InitIsolateOnce(); - HandleScope scope(isolate); - Handle buffer; - CHECK(wasm::NewArrayBuffer(isolate, 16 * kWasmPageSize).ToHandle(&buffer)); - Handle mem_obj = - WasmMemoryObject::New(isolate, buffer, 100); - auto const contents = v8::Utils::ToLocal(buffer)->Externalize(); - int32_t result = WasmMemoryObject::Grow(isolate, mem_obj, 0); - CHECK_EQ(16, result); - constexpr bool is_wasm_memory = true; - const JSArrayBuffer::Allocation allocation{contents.AllocationBase(), - contents.AllocationLength(), - contents.Data(), is_wasm_memory}; - JSArrayBuffer::FreeBackingStore(isolate, allocation); - } - Cleanup(); -} - -TEST(Run_WasmModule_Buffer_Externalized_Detach) { - { - // Regression test for - // https://bugs.chromium.org/p/chromium/issues/detail?id=731046 - Isolate* isolate = CcTest::InitIsolateOnce(); - HandleScope scope(isolate); - Handle buffer; - CHECK(wasm::NewArrayBuffer(isolate, 16 * kWasmPageSize).ToHandle(&buffer)); - auto const contents = v8::Utils::ToLocal(buffer)->Externalize(); - wasm::DetachMemoryBuffer(isolate, buffer, true); - constexpr bool is_wasm_memory = true; - const JSArrayBuffer::Allocation allocation{contents.AllocationBase(), - contents.AllocationLength(), - contents.Data(), is_wasm_memory}; - JSArrayBuffer::FreeBackingStore(isolate, allocation); - } - Cleanup(); -} - -TEST(Run_WasmModule_Buffer_Externalized_Regression_UseAfterFree) { - // Regresion test for https://crbug.com/813876 - Isolate* isolate = CcTest::InitIsolateOnce(); - HandleScope scope(isolate); - Handle buffer; - CHECK(wasm::NewArrayBuffer(isolate, 16 * kWasmPageSize).ToHandle(&buffer)); - Handle mem = WasmMemoryObject::New(isolate, buffer, 128); - auto contents = v8::Utils::ToLocal(buffer)->Externalize(); - WasmMemoryObject::Grow(isolate, mem, 0); - constexpr bool is_wasm_memory = true; - JSArrayBuffer::FreeBackingStore( - isolate, JSArrayBuffer::Allocation(contents.AllocationBase(), - contents.AllocationLength(), - contents.Data(), is_wasm_memory)); - // Make sure we can write to the buffer without crashing - uint32_t* int_buffer = - reinterpret_cast(mem->array_buffer().backing_store()); - int_buffer[0] = 0; -} - -#if V8_TARGET_ARCH_64_BIT -TEST(Run_WasmModule_Reclaim_Memory) { - // Make sure we can allocate memories without running out of address space. - Isolate* isolate = CcTest::InitIsolateOnce(); - Handle buffer; - for (int i = 0; i < 256; ++i) { - HandleScope scope(isolate); - CHECK(NewArrayBuffer(isolate, kWasmPageSize).ToHandle(&buffer)); - } -} -#endif - TEST(AtomicOpDisassembly) { { EXPERIMENTAL_FLAG_SCOPE(threads); diff --git a/test/cctest/wasm/test-wasm-serialization.cc b/test/cctest/wasm/test-wasm-serialization.cc index 1ff2a899ad..ad5d5b1382 100644 --- a/test/cctest/wasm/test-wasm-serialization.cc +++ b/test/cctest/wasm/test-wasm-serialization.cc @@ -11,7 +11,6 @@ #include "src/utils/version.h" #include "src/wasm/module-decoder.h" #include "src/wasm/wasm-engine.h" -#include "src/wasm/wasm-memory.h" #include "src/wasm/wasm-module-builder.h" #include "src/wasm/wasm-module.h" #include "src/wasm/wasm-objects-inl.h" diff --git a/test/cctest/wasm/wasm-run-utils.cc b/test/cctest/wasm/wasm-run-utils.cc index 6a17b81c56..b8388665db 100644 --- a/test/cctest/wasm/wasm-run-utils.cc +++ b/test/cctest/wasm/wasm-run-utils.cc @@ -10,7 +10,6 @@ #include "src/wasm/graph-builder-interface.h" #include "src/wasm/module-compiler.h" #include "src/wasm/wasm-import-wrapper-cache.h" -#include "src/wasm/wasm-memory.h" #include "src/wasm/wasm-objects-inl.h" namespace v8 { @@ -75,29 +74,23 @@ byte* TestingModuleBuilder::AddMemory(uint32_t size, SharedFlag shared) { CHECK_NULL(mem_start_); CHECK_EQ(0, mem_size_); DCHECK(!instance_object_->has_memory_object()); - DCHECK_IMPLIES(test_module_->origin == kWasmOrigin, - size % kWasmPageSize == 0); + uint32_t initial_pages = RoundUp(size, kWasmPageSize) / kWasmPageSize; + uint32_t maximum_pages = (test_module_->maximum_pages != 0) + ? test_module_->maximum_pages + : initial_pages; test_module_->has_memory = true; - uint32_t max_size = - (test_module_->maximum_pages != 0) ? test_module_->maximum_pages : size; - uint32_t alloc_size = RoundUp(size, kWasmPageSize); - Handle new_buffer; - if (shared == SharedFlag::kShared) { - CHECK(NewSharedArrayBuffer(isolate_, alloc_size, max_size) - .ToHandle(&new_buffer)); - } else { - CHECK(NewArrayBuffer(isolate_, alloc_size).ToHandle(&new_buffer)); - } - CHECK(!new_buffer.is_null()); - mem_start_ = reinterpret_cast(new_buffer->backing_store()); - mem_size_ = size; - CHECK(size == 0 || mem_start_); - memset(mem_start_, 0, size); // Create the WasmMemoryObject. Handle memory_object = - WasmMemoryObject::New(isolate_, new_buffer, max_size); + WasmMemoryObject::New(isolate_, initial_pages, maximum_pages, shared) + .ToHandleChecked(); instance_object_->set_memory_object(*memory_object); + + mem_start_ = + reinterpret_cast(memory_object->array_buffer().backing_store()); + mem_size_ = size; + CHECK(size == 0 || mem_start_); + WasmMemoryObject::AddInstance(isolate_, memory_object, instance_object_); // TODO(wasm): Delete the following two lines when test-run-wasm will use a // multiple of kPageSize as memory size. At the moment, the effect of these diff --git a/test/mjsunit/wasm/gc-memory.js b/test/mjsunit/wasm/gc-memory.js new file mode 100644 index 0000000000..31e96f8be3 --- /dev/null +++ b/test/mjsunit/wasm/gc-memory.js @@ -0,0 +1,41 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +let kPageSize = 65536; + +function allocMems(count, initial, maximum) { + print(`alloc ${count}`); + let result = []; + for (let i = 0; i < count; i++) { + print(` memory #${i} (initial=${initial}, maximum=${maximum})...`); + result.push(new WebAssembly.Memory({initial: initial, maximum: maximum})); + } + return result; +} + +function check(mems, initial) { + for (m of mems) { + assertEquals(initial * kPageSize, m.buffer.byteLength); + } +} + +function test(count, initial, maximum) { + let mems = allocMems(count, initial, maximum); + check(mems, initial); +} + +test(1, 1, 1); +test(1, 1, 2); +test(1, 1, 3); +test(1, 1, 4); + +test(2, 1, 1); +test(2, 1, 2); +test(2, 1, 3); +test(2, 1, 4); + +test(1, 1, undefined); +test(2, 1, undefined); +test(3, 1, undefined); +test(4, 1, undefined); diff --git a/test/unittests/compiler/js-typed-lowering-unittest.cc b/test/unittests/compiler/js-typed-lowering-unittest.cc index 0d7bb946e3..0616a9ae2a 100644 --- a/test/unittests/compiler/js-typed-lowering-unittest.cc +++ b/test/unittests/compiler/js-typed-lowering-unittest.cc @@ -52,13 +52,6 @@ class JSTypedLoweringTest : public TypedGraphTest { return reducer.Reduce(node); } - Handle NewArrayBuffer(void* bytes, size_t byte_length) { - Handle buffer = - factory()->NewJSArrayBuffer(SharedFlag::kNotShared); - JSArrayBuffer::Setup(buffer, isolate(), true, bytes, byte_length); - return buffer; - } - JSOperatorBuilder* javascript() { return &javascript_; } private: diff --git a/test/unittests/objects/value-serializer-unittest.cc b/test/unittests/objects/value-serializer-unittest.cc index a3a6fb22a7..e7ddf3138a 100644 --- a/test/unittests/objects/value-serializer-unittest.cc +++ b/test/unittests/objects/value-serializer-unittest.cc @@ -10,6 +10,7 @@ #include "include/v8.h" #include "src/api/api-inl.h" #include "src/base/build_config.h" +#include "src/objects/backing-store.h" #include "src/objects/objects-inl.h" #include "src/wasm/wasm-objects.h" #include "test/unittests/test-utils.h" @@ -1987,23 +1988,44 @@ class ValueSerializerTestWithSharedArrayBufferClone ValueSerializerTestWithSharedArrayBufferClone() : serializer_delegate_(this), deserializer_delegate_(this) {} - void InitializeData(const std::vector& data) { + void InitializeData(const std::vector& data, bool is_wasm_memory) { data_ = data; { Context::Scope scope(serialization_context()); input_buffer_ = - SharedArrayBuffer::New(isolate(), data_.data(), data_.size()); + NewSharedArrayBuffer(data_.data(), data_.size(), is_wasm_memory); } { Context::Scope scope(deserialization_context()); output_buffer_ = - SharedArrayBuffer::New(isolate(), data_.data(), data_.size()); + NewSharedArrayBuffer(data_.data(), data_.size(), is_wasm_memory); } } const Local& input_buffer() { return input_buffer_; } const Local& output_buffer() { return output_buffer_; } + Local NewSharedArrayBuffer(void* data, size_t byte_length, + bool is_wasm_memory) { + if (is_wasm_memory) { + // TODO(titzer): there is no way to create Wasm memory backing stores + // through the API, or to create a shared array buffer whose backing + // store is wasm memory, so use the internal API. + DCHECK_EQ(0, byte_length % i::wasm::kWasmPageSize); + auto pages = byte_length / i::wasm::kWasmPageSize; + auto i_isolate = reinterpret_cast(isolate()); + auto backing_store = i::BackingStore::AllocateWasmMemory( + i_isolate, pages, pages, i::SharedFlag::kShared); + memcpy(backing_store->buffer_start(), data, byte_length); + i::Handle buffer = + i_isolate->factory()->NewJSSharedArrayBuffer(); + buffer->Attach(std::move(backing_store)); + return Utils::ToLocalShared(buffer); + } else { + return SharedArrayBuffer::New(isolate(), data, byte_length); + } + } + static void SetUpTestCase() { flag_was_enabled_ = i::FLAG_harmony_sharedarraybuffer; i::FLAG_harmony_sharedarraybuffer = true; @@ -2075,7 +2097,7 @@ bool ValueSerializerTestWithSharedArrayBufferClone::flag_was_enabled_ = false; TEST_F(ValueSerializerTestWithSharedArrayBufferClone, RoundTripSharedArrayBufferClone) { - InitializeData({0x00, 0x01, 0x80, 0xFF}); + InitializeData({0x00, 0x01, 0x80, 0xFF}, false); EXPECT_CALL(serializer_delegate_, GetSharedArrayBufferId(isolate(), input_buffer())) @@ -2114,7 +2136,7 @@ TEST_F(ValueSerializerTestWithSharedArrayBufferClone, std::vector data = {0x00, 0x01, 0x80, 0xFF}; data.resize(65536); - InitializeData(data); + InitializeData(data, true); EXPECT_CALL(serializer_delegate_, GetSharedArrayBufferId(isolate(), input_buffer())) diff --git a/test/unittests/wasm/trap-handler-x64-unittest.cc b/test/unittests/wasm/trap-handler-x64-unittest.cc index 1659370999..e9bd5bd3c6 100644 --- a/test/unittests/wasm/trap-handler-x64-unittest.cc +++ b/test/unittests/wasm/trap-handler-x64-unittest.cc @@ -25,11 +25,11 @@ #include "src/codegen/assembler-inl.h" #include "src/codegen/macro-assembler-inl.h" #include "src/execution/simulator.h" +#include "src/objects/backing-store.h" #include "src/trap-handler/trap-handler.h" #include "src/utils/allocation.h" #include "src/utils/vector.h" #include "src/wasm/wasm-engine.h" -#include "src/wasm/wasm-memory.h" #include "test/common/assembler-tester.h" #include "test/unittests/test-utils.h" @@ -80,19 +80,13 @@ class TrapHandlerTest : public TestWithIsolate, public ::testing::WithParamInterface { protected: void SetUp() override { - void* base = nullptr; - size_t length = 0; - accessible_memory_start_ = - i_isolate() - ->wasm_engine() - ->memory_tracker() - ->TryAllocateBackingStoreForTesting( - i_isolate()->heap(), 1 * kWasmPageSize, &base, &length); - memory_buffer_ = - base::AddressRegion(reinterpret_cast
(base), length); - - // The allocated memory buffer ends with a guard page. - crash_address_ = memory_buffer_.end() - 32; + backing_store_ = BackingStore::AllocateWasmMemory(i_isolate(), 1, 1, + SharedFlag::kNotShared); + CHECK(backing_store_); + CHECK(backing_store_->has_guard_regions()); + // The allocated backing store ends with a guard page. + crash_address_ = reinterpret_cast
(backing_store_->buffer_start()) + + backing_store_->byte_length() + 32; // Allocate a buffer for the generated code. buffer_ = AllocateAssemblerBuffer(AssemblerBase::kMinimalBufferSize, GetRandomMmapAddr()); @@ -122,10 +116,7 @@ class TrapHandlerTest : public TestWithIsolate, CHECK(!GetThreadInWasmFlag()); buffer_.reset(); recovery_buffer_.reset(); - - // Free the allocated backing store. - i_isolate()->wasm_engine()->memory_tracker()->FreeBackingStoreForTesting( - memory_buffer_, accessible_memory_start_); + backing_store_.reset(); // Clean up the trap handler trap_handler::RemoveTrapHandler(); @@ -252,14 +243,12 @@ class TrapHandlerTest : public TestWithIsolate, bool test_handler_executed() { return g_test_handler_executed; } - // Allocated memory which corresponds to wasm memory with guard regions. - base::AddressRegion memory_buffer_; + // The backing store used for testing the trap handler. + std::unique_ptr backing_store_; + // Address within the guard region of the wasm memory. Accessing this memory // address causes a signal or exception. Address crash_address_; - // The start of the accessible region in the allocated memory. This pointer is - // needed to de-register the memory from the wasm memory tracker again. - void* accessible_memory_start_; // Buffer for generated code. std::unique_ptr buffer_; diff --git a/test/unittests/wasm/wasm-code-manager-unittest.cc b/test/unittests/wasm/wasm-code-manager-unittest.cc index eea1f8208d..bd5abb23cb 100644 --- a/test/unittests/wasm/wasm-code-manager-unittest.cc +++ b/test/unittests/wasm/wasm-code-manager-unittest.cc @@ -9,7 +9,6 @@ #include "src/wasm/jump-table-assembler.h" #include "src/wasm/wasm-code-manager.h" #include "src/wasm/wasm-engine.h" -#include "src/wasm/wasm-memory.h" namespace v8 { namespace internal {