Revert "[arraybuffer] Rearchitect backing store ownership"

This reverts commit 31cd5d83d3.

Reason for revert: It breaks my heart to revert this, but it fails differently on several bots, e.g. https://ci.chromium.org/p/v8/builders/ci/V8%20Linux%20-%20debug/26671.

Original change's description:
> [arraybuffer] Rearchitect backing store ownership
> 
> This CL completely rearchitects the ownership of array buffer backing stores,
> consolidating ownership into a {BackingStore} C++ object that is tracked
> throughout V8 using unique_ptr and shared_ptr where appropriate.
> 
> Overall, lifetime management is simpler and more explicit. The numerous
> ways that array buffers were initialized have been streamlined to one
> Attach() method on JSArrayBuffer. The array buffer tracker in the
> GC implementation now manages std::shared_ptr<BackingStore> pointers,
> and the construction and destruction of the BackingStore object itself
> handles the underlying page or embedder-allocated memory.
> 
> The embedder API remains unchanged for now. We use the
> v8::ArrayBuffer::Contents struct to hide an additional shared_ptr to
> keep the backing store alive properly, even in the case of aliases
> from live heap objects. Thus the embedder has a lower chance of making
> a mistake. Long-term, we should move the embedder to a model where they
> manage backing stores using shared_ptr to an opaque backing store object.
> 
> R=​mlippautz@chromium.org
> BUG=v8:9380,v8:9221
> 
> Change-Id: I48fae5ac85dcf6172a83f252439e77e7c1a16ccd
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1584323
> Commit-Queue: Ben Titzer <titzer@chromium.org>
> Reviewed-by: Ben Titzer <titzer@chromium.org>
> Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
> Reviewed-by: Yang Guo <yangguo@chromium.org>
> Reviewed-by: Deepti Gandluri <gdeepti@chromium.org>
> Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
> Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#62572}

TBR=ulan@chromium.org,yangguo@chromium.org,mstarzinger@chromium.org,titzer@chromium.org,gdeepti@chromium.org,mlippautz@chromium.org

Change-Id: Ib35788ba8c31192d90cbc72df3dbc41030f109de
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: v8:9380, v8:9221
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1691034
Reviewed-by: Clemens Hammacher <clemensh@chromium.org>
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Cr-Commit-Position: refs/heads/master@{#62578}
This commit is contained in:
Clemens Hammacher 2019-07-08 17:20:38 +00:00 committed by Commit Bot
parent 6ad6dd1a7a
commit bf92fbf4c8
61 changed files with 1886 additions and 2049 deletions

View File

@ -2415,8 +2415,6 @@ v8_source_set("v8_base_without_compiler") {
"src/objects/api-callbacks.h",
"src/objects/arguments-inl.h",
"src/objects/arguments.h",
"src/objects/backing-store.cc",
"src/objects/backing-store.h",
"src/objects/bigint.cc",
"src/objects/bigint.h",
"src/objects/cell-inl.h",
@ -2910,6 +2908,8 @@ v8_source_set("v8_base_without_compiler") {
"src/wasm/wasm-js.h",
"src/wasm/wasm-limits.h",
"src/wasm/wasm-linkage.h",
"src/wasm/wasm-memory.cc",
"src/wasm/wasm-memory.h",
"src/wasm/wasm-module-builder.cc",
"src/wasm/wasm-module-builder.h",
"src/wasm/wasm-module.cc",

View File

@ -4782,7 +4782,7 @@ class V8_EXPORT ArrayBuffer : public Object {
*
* The Data pointer of ArrayBuffer::Contents must be freed using the provided
* deleter, which will call ArrayBuffer::Allocator::Free if the buffer
* was allocated with ArrayBuffer::Allocator::Allocate.
* was allocated with ArraryBuffer::Allocator::Allocate.
*/
Contents Externalize();
@ -4804,7 +4804,6 @@ class V8_EXPORT ArrayBuffer : public Object {
private:
ArrayBuffer();
static void CheckCast(Value* obj);
Contents GetContents(bool externalize);
};
@ -5222,7 +5221,6 @@ class V8_EXPORT SharedArrayBuffer : public Object {
private:
SharedArrayBuffer();
static void CheckCast(Value* obj);
Contents GetContents(bool externalize);
};

View File

@ -126,8 +126,6 @@
#endif // V8_TARGET_ARCH_X64
#endif // V8_OS_WIN
#define TRACE_BS(...) /* redefine for tracing backing store operations */
namespace v8 {
/*
@ -7160,75 +7158,20 @@ bool v8::ArrayBuffer::IsDetachable() const {
return Utils::OpenHandle(this)->is_detachable();
}
namespace {
// The backing store deleter just deletes the indirection, which downrefs
// the shared pointer. It will get collected normally.
void BackingStoreDeleter(void* buffer, size_t length, void* info) {
auto bs_indirection =
reinterpret_cast<std::shared_ptr<i::BackingStore>*>(info);
if (bs_indirection) {
auto backing_store = bs_indirection->get();
TRACE_BS("API:delete bs=%p mem=%p (%zu bytes)\n", backing_store,
backing_store->buffer_start(), backing_store->byte_length());
USE(backing_store);
}
delete bs_indirection;
v8::ArrayBuffer::Contents v8::ArrayBuffer::Externalize() {
i::Handle<i::JSArrayBuffer> self = Utils::OpenHandle(this);
i::Isolate* isolate = self->GetIsolate();
Utils::ApiCheck(!self->is_external(), "v8_ArrayBuffer_Externalize",
"ArrayBuffer already externalized");
self->set_is_external(true);
const v8::ArrayBuffer::Contents contents = GetContents();
isolate->heap()->UnregisterArrayBuffer(*self);
// A regular copy is good enough. No move semantics needed.
return contents;
}
void* MakeDeleterData(std::shared_ptr<i::BackingStore> backing_store) {
if (!backing_store) return nullptr;
TRACE_BS("API:extern bs=%p mem=%p (%zu bytes)\n", backing_store.get(),
backing_store->buffer_start(), backing_store->byte_length());
return new std::shared_ptr<i::BackingStore>(backing_store);
}
std::shared_ptr<i::BackingStore> LookupOrCreateBackingStore(
i::Isolate* i_isolate, void* data, size_t byte_length, i::SharedFlag shared,
ArrayBufferCreationMode mode) {
// "internalized" means that the storage was allocated by the
// ArrayBufferAllocator and thus should be freed upon destruction.
bool free_on_destruct = mode == ArrayBufferCreationMode::kInternalized;
// Try to lookup a previously-registered backing store in the global
// registry. If found, use that instead of wrapping an embedder allocation.
std::shared_ptr<i::BackingStore> backing_store =
i::GlobalBackingStoreRegistry::Lookup(data, byte_length);
if (backing_store) {
// Check invariants for a previously-found backing store.
// 1. We cannot allow an embedder to first allocate a backing store that
// should not be freed upon destruct, and then allocate an alias that should
// destruct it. The other order is fine.
bool changing_destruct_mode =
free_on_destruct && !backing_store->free_on_destruct();
Utils::ApiCheck(
!changing_destruct_mode, "v8_[Shared]ArrayBuffer_New",
"previous backing store found that should not be freed on destruct");
// 2. We cannot allow embedders to use the same backing store for both
// SharedArrayBuffers and regular ArrayBuffers.
bool changing_shared_flag =
(shared == i::SharedFlag::kShared) != backing_store->is_shared();
Utils::ApiCheck(
!changing_shared_flag, "v8_[Shared]ArrayBuffer_New",
"previous backing store found that does not match shared flag");
} else {
// No previous backing store found.
backing_store = i::BackingStore::WrapAllocation(
i_isolate, data, byte_length, shared, free_on_destruct);
if (free_on_destruct) {
// The embedder requested free-on-destruct. They already have a
// direct pointer to the buffer start, so globally register the backing
// store in case they come back with the same buffer start.
i::GlobalBackingStoreRegistry::Register(backing_store);
}
}
return backing_store;
}
} // namespace
v8::ArrayBuffer::Contents::Contents(void* data, size_t byte_length,
void* allocation_base,
size_t allocation_length,
@ -7245,61 +7188,29 @@ v8::ArrayBuffer::Contents::Contents(void* data, size_t byte_length,
DCHECK_LE(byte_length_, allocation_length_);
}
v8::ArrayBuffer::Contents v8::ArrayBuffer::Externalize() {
return GetContents(true);
void WasmMemoryDeleter(void* buffer, size_t lenght, void* info) {
internal::wasm::WasmEngine* engine =
reinterpret_cast<internal::wasm::WasmEngine*>(info);
CHECK(engine->memory_tracker()->FreeWasmMemory(nullptr, buffer));
}
void ArrayBufferDeleter(void* buffer, size_t length, void* info) {
v8::ArrayBuffer::Allocator* allocator =
reinterpret_cast<v8::ArrayBuffer::Allocator*>(info);
allocator->Free(buffer, length);
}
v8::ArrayBuffer::Contents v8::ArrayBuffer::GetContents() {
return GetContents(false);
}
v8::ArrayBuffer::Contents v8::ArrayBuffer::GetContents(bool externalize) {
// TODO(titzer): reduce duplication between shared/unshared GetContents()
using BufferType = v8::ArrayBuffer;
i::Handle<i::JSArrayBuffer> self = Utils::OpenHandle(this);
std::shared_ptr<i::BackingStore> backing_store = self->GetBackingStore();
void* deleter_data = nullptr;
if (externalize) {
Utils::ApiCheck(!self->is_external(), "v8_ArrayBuffer_Externalize",
"ArrayBuffer already externalized");
self->set_is_external(true);
// When externalizing, upref the shared pointer to the backing store
// and store that as the deleter data. When the embedder calls the deleter
// callback, we will delete the additional (on-heap) shared_ptr.
deleter_data = MakeDeleterData(backing_store);
}
if (!backing_store) {
// If the array buffer has zero length or was detached, return empty
// contents.
DCHECK_EQ(0, self->byte_length());
BufferType::Contents contents(
nullptr, 0, nullptr, 0,
v8::ArrayBuffer::Allocator::AllocationMode::kNormal,
BackingStoreDeleter, deleter_data);
return contents;
}
// Backing stores that given to the embedder might be passed back through
// the API using only the start of the buffer. We need to find such
// backing stores using global registration until the API is changed.
i::GlobalBackingStoreRegistry::Register(backing_store);
auto allocation_mode =
backing_store->is_wasm_memory()
? v8::ArrayBuffer::Allocator::AllocationMode::kReservation
: v8::ArrayBuffer::Allocator::AllocationMode::kNormal;
BufferType::Contents contents(backing_store->buffer_start(), // --
backing_store->byte_length(), // --
backing_store->buffer_start(), // --
backing_store->byte_length(), // --
allocation_mode, // --
BackingStoreDeleter, // --
deleter_data);
Contents contents(
self->backing_store(), self->byte_length(), self->allocation_base(),
self->allocation_length(),
self->is_wasm_memory() ? Allocator::AllocationMode::kReservation
: Allocator::AllocationMode::kNormal,
self->is_wasm_memory() ? WasmMemoryDeleter : ArrayBufferDeleter,
self->is_wasm_memory()
? static_cast<void*>(self->GetIsolate()->wasm_engine())
: static_cast<void*>(self->GetIsolate()->array_buffer_allocator()));
return contents;
}
@ -7324,18 +7235,14 @@ Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* isolate, size_t byte_length) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, ArrayBuffer, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
i::MaybeHandle<i::JSArrayBuffer> result =
i_isolate->factory()->NewJSArrayBufferAndBackingStore(
byte_length, i::InitializedFlag::kZeroInitialized);
i::Handle<i::JSArrayBuffer> array_buffer;
if (!result.ToHandle(&array_buffer)) {
// TODO(jbroman): It may be useful in the future to provide a MaybeLocal
// version that throws an exception or otherwise does not crash.
i::Handle<i::JSArrayBuffer> obj =
i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kNotShared);
// TODO(jbroman): It may be useful in the future to provide a MaybeLocal
// version that throws an exception or otherwise does not crash.
if (!i::JSArrayBuffer::SetupAllocatingData(obj, i_isolate, byte_length)) {
i::FatalProcessOutOfMemory(i_isolate, "v8::ArrayBuffer::New");
}
return Utils::ToLocal(array_buffer);
return Utils::ToLocal(obj);
}
Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* isolate, void* data,
@ -7347,15 +7254,11 @@ Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* isolate, void* data,
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, ArrayBuffer, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
std::shared_ptr<i::BackingStore> backing_store = LookupOrCreateBackingStore(
i_isolate, data, byte_length, i::SharedFlag::kNotShared, mode);
i::Handle<i::JSArrayBuffer> obj = i_isolate->factory()->NewJSArrayBuffer();
obj->Attach(std::move(backing_store));
if (mode == ArrayBufferCreationMode::kExternalized) {
obj->set_is_external(true);
}
i::Handle<i::JSArrayBuffer> obj =
i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kNotShared);
i::JSArrayBuffer::Setup(obj, i_isolate,
mode == ArrayBufferCreationMode::kExternalized, data,
byte_length);
return Utils::ToLocal(obj);
}
@ -7398,9 +7301,9 @@ size_t v8::ArrayBufferView::CopyContents(void* dest, size_t byte_length) {
bool v8::ArrayBufferView::HasBuffer() const {
i::Handle<i::JSArrayBufferView> self = Utils::OpenHandle(this);
if (!self->IsJSTypedArray()) return true;
auto typed_array = i::Handle<i::JSTypedArray>::cast(self);
return !typed_array->is_on_heap();
i::Handle<i::JSArrayBuffer> buffer(i::JSArrayBuffer::cast(self->buffer()),
self->GetIsolate());
return buffer->backing_store() != nullptr;
}
size_t v8::ArrayBufferView::ByteOffset() {
@ -7496,17 +7399,13 @@ i::Handle<i::JSArrayBuffer> SetupSharedArrayBuffer(
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, SharedArrayBuffer, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
std::shared_ptr<i::BackingStore> backing_store = LookupOrCreateBackingStore(
i_isolate, data, byte_length, i::SharedFlag::kShared, mode);
i::Handle<i::JSArrayBuffer> obj =
i_isolate->factory()->NewJSSharedArrayBuffer();
obj->Attach(backing_store);
if (mode == ArrayBufferCreationMode::kExternalized) {
obj->set_is_external(true);
}
i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kShared);
bool is_wasm_memory =
i_isolate->wasm_engine()->memory_tracker()->IsWasmMemory(data);
i::JSArrayBuffer::Setup(obj, i_isolate,
mode == ArrayBufferCreationMode::kExternalized, data,
byte_length, i::SharedFlag::kShared, is_wasm_memory);
return obj;
}
@ -7516,6 +7415,20 @@ bool v8::SharedArrayBuffer::IsExternal() const {
return Utils::OpenHandle(this)->is_external();
}
v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::Externalize() {
i::Handle<i::JSArrayBuffer> self = Utils::OpenHandle(this);
i::Isolate* isolate = self->GetIsolate();
Utils::ApiCheck(!self->is_external(), "v8_SharedArrayBuffer_Externalize",
"SharedArrayBuffer already externalized");
self->set_is_external(true);
const v8::SharedArrayBuffer::Contents contents = GetContents();
isolate->heap()->UnregisterArrayBuffer(*self);
// A regular copy is good enough. No move semantics needed.
return contents;
}
v8::SharedArrayBuffer::Contents::Contents(
void* data, size_t byte_length, void* allocation_base,
size_t allocation_length, Allocator::AllocationMode allocation_mode,
@ -7531,62 +7444,20 @@ v8::SharedArrayBuffer::Contents::Contents(
DCHECK_LE(byte_length_, allocation_length_);
}
v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::Externalize() {
return GetContents(true);
}
v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::GetContents() {
return GetContents(false);
}
v8::SharedArrayBuffer::Contents v8::SharedArrayBuffer::GetContents(
bool externalize) {
// TODO(titzer): reduce duplication between shared/unshared GetContents()
using BufferType = v8::SharedArrayBuffer;
i::Handle<i::JSArrayBuffer> self = Utils::OpenHandle(this);
std::shared_ptr<i::BackingStore> backing_store = self->GetBackingStore();
void* deleter_data = nullptr;
if (externalize) {
Utils::ApiCheck(!self->is_external(), "v8_SharedArrayBuffer_Externalize",
"SharedArrayBuffer already externalized");
self->set_is_external(true);
// When externalizing, upref the shared pointer to the backing store
// and store that as the deleter data. When the embedder calls the deleter
// callback, we will delete the additional (on-heap) shared_ptr.
deleter_data = MakeDeleterData(backing_store);
}
if (!backing_store) {
// If the array buffer has zero length or was detached, return empty
// contents.
DCHECK_EQ(0, self->byte_length());
BufferType::Contents contents(
nullptr, 0, nullptr, 0,
v8::ArrayBuffer::Allocator::AllocationMode::kNormal,
BackingStoreDeleter, deleter_data);
return contents;
}
// Backing stores that given to the embedder might be passed back through
// the API using only the start of the buffer. We need to find such
// backing stores using global registration until the API is changed.
i::GlobalBackingStoreRegistry::Register(backing_store);
auto allocation_mode =
backing_store->is_wasm_memory()
? v8::ArrayBuffer::Allocator::AllocationMode::kReservation
: v8::ArrayBuffer::Allocator::AllocationMode::kNormal;
BufferType::Contents contents(backing_store->buffer_start(), // --
backing_store->byte_length(), // --
backing_store->buffer_start(), // --
backing_store->byte_length(), // --
allocation_mode, // --
BackingStoreDeleter, // --
deleter_data);
Contents contents(
self->backing_store(), self->byte_length(), self->allocation_base(),
self->allocation_length(),
self->is_wasm_memory()
? ArrayBuffer::Allocator::AllocationMode::kReservation
: ArrayBuffer::Allocator::AllocationMode::kNormal,
self->is_wasm_memory()
? reinterpret_cast<Contents::DeleterCallback>(WasmMemoryDeleter)
: reinterpret_cast<Contents::DeleterCallback>(ArrayBufferDeleter),
self->is_wasm_memory()
? static_cast<void*>(self->GetIsolate()->wasm_engine())
: static_cast<void*>(self->GetIsolate()->array_buffer_allocator()));
return contents;
}
@ -7601,20 +7472,14 @@ Local<SharedArrayBuffer> v8::SharedArrayBuffer::New(Isolate* isolate,
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
LOG_API(i_isolate, SharedArrayBuffer, New);
ENTER_V8_NO_SCRIPT_NO_EXCEPTION(i_isolate);
std::unique_ptr<i::BackingStore> backing_store =
i::BackingStore::Allocate(i_isolate, byte_length, i::SharedFlag::kShared,
i::InitializedFlag::kZeroInitialized);
if (!backing_store) {
// TODO(jbroman): It may be useful in the future to provide a MaybeLocal
// version that throws an exception or otherwise does not crash.
i::Handle<i::JSArrayBuffer> obj =
i_isolate->factory()->NewJSArrayBuffer(i::SharedFlag::kShared);
// TODO(jbroman): It may be useful in the future to provide a MaybeLocal
// version that throws an exception or otherwise does not crash.
if (!i::JSArrayBuffer::SetupAllocatingData(obj, i_isolate, byte_length, true,
i::SharedFlag::kShared)) {
i::FatalProcessOutOfMemory(i_isolate, "v8::SharedArrayBuffer::New");
}
i::Handle<i::JSArrayBuffer> obj =
i_isolate->factory()->NewJSSharedArrayBuffer();
obj->Attach(std::move(backing_store));
return Utils::ToLocalShared(obj);
}
@ -10575,5 +10440,3 @@ void InvokeFunctionCallback(const v8::FunctionCallbackInfo<v8::Value>& info,
} // namespace internal
} // namespace v8
#undef TRACE_BS

View File

@ -387,12 +387,7 @@ MaybeHandle<Object> AsmJs::InstantiateAsmWasm(Isolate* isolate,
ReportInstantiationFailure(script, position, "Requires heap buffer");
return MaybeHandle<Object>();
}
// Mark the buffer as being used as an asm.js memory. This implies two
// things: 1) if the buffer is from a Wasm memory, that memory can no longer
// be grown, since that would detach this buffer, and 2) the buffer cannot
// be postMessage()'d, as that also detaches the buffer.
memory->set_is_asmjs_memory(true);
memory->set_is_detachable(false);
wasm_engine->memory_tracker()->MarkWasmMemoryNotGrowable(memory);
size_t size = memory->byte_length();
// Check the asm.js heap size against the valid limits.
if (!IsValidAsmjsMemorySize(size)) {

View File

@ -30,36 +30,29 @@ namespace {
Object ConstructBuffer(Isolate* isolate, Handle<JSFunction> target,
Handle<JSReceiver> new_target, Handle<Object> length,
InitializedFlag initialized) {
bool initialize) {
Handle<JSObject> result;
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
isolate, result,
JSObject::New(target, new_target, Handle<AllocationSite>::null()));
auto array_buffer = Handle<JSArrayBuffer>::cast(result);
SharedFlag shared = (*target != target->native_context().array_buffer_fun())
? SharedFlag::kShared
: SharedFlag::kNotShared;
size_t byte_length;
if (!TryNumberToSize(*length, &byte_length) ||
byte_length > JSArrayBuffer::kMaxByteLength) {
// ToNumber failed.
array_buffer->SetupEmpty(shared);
JSArrayBuffer::SetupAsEmpty(Handle<JSArrayBuffer>::cast(result), isolate);
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength));
}
auto backing_store =
BackingStore::Allocate(isolate, byte_length, shared, initialized);
if (backing_store) {
array_buffer->Attach(std::move(backing_store));
return *array_buffer;
SharedFlag shared_flag =
(*target == target->native_context().array_buffer_fun())
? SharedFlag::kNotShared
: SharedFlag::kShared;
if (!JSArrayBuffer::SetupAllocatingData(Handle<JSArrayBuffer>::cast(result),
isolate, byte_length, initialize,
shared_flag)) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewRangeError(MessageTemplate::kArrayBufferAllocationFailed));
}
// Allocation of backing store failed.
array_buffer->SetupEmpty(shared);
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewRangeError(MessageTemplate::kArrayBufferAllocationFailed));
return *result;
}
} // namespace
@ -87,8 +80,7 @@ BUILTIN(ArrayBufferConstructor) {
isolate, NewRangeError(MessageTemplate::kInvalidArrayBufferLength));
}
return ConstructBuffer(isolate, target, new_target, number_length,
InitializedFlag::kZeroInitialized);
return ConstructBuffer(isolate, target, new_target, number_length, true);
}
// This is a helper to construct an ArrayBuffer with uinitialized memory.
@ -99,8 +91,7 @@ BUILTIN(ArrayBufferConstructor_DoNotInitialize) {
Handle<JSFunction> target(isolate->native_context()->array_buffer_fun(),
isolate);
Handle<Object> length = args.atOrUndefined(isolate, 1);
return ConstructBuffer(isolate, target, target, length,
InitializedFlag::kUninitialized);
return ConstructBuffer(isolate, target, target, length, false);
}
// ES6 section 24.1.4.1 get ArrayBuffer.prototype.byteLength

View File

@ -70,8 +70,6 @@
#define CHECK(condition) assert(condition)
#endif
#define TRACE_BS(...) /* redefine for tracing backing store operations */
namespace v8 {
namespace {
@ -3101,17 +3099,6 @@ class Serializer : public ValueSerializer::Delegate {
std::unique_ptr<SerializationData> Release() { return std::move(data_); }
void AppendExternalizedContentsTo(std::vector<ExternalizedContents>* to) {
for (auto& contents : externalized_contents_) {
auto bs_indirection = reinterpret_cast<std::shared_ptr<i::BackingStore>*>(
contents.DeleterData());
if (bs_indirection) {
auto backing_store = bs_indirection->get();
TRACE_BS("d8:append bs=%p mem=%p (%zu bytes)\n", backing_store,
backing_store->buffer_start(), backing_store->byte_length());
USE(backing_store);
}
}
to->insert(to->end(),
std::make_move_iterator(externalized_contents_.begin()),
std::make_move_iterator(externalized_contents_.end()));
@ -3592,4 +3579,3 @@ int main(int argc, char* argv[]) { return v8::Shell::Main(argc, argv); }
#undef CHECK
#undef DCHECK
#undef TRACE_BS

View File

@ -149,8 +149,6 @@ class ExternalizedContents {
}
~ExternalizedContents();
void* DeleterData() { return deleter_data_; }
private:
void* data_;
size_t length_;

View File

@ -1378,6 +1378,7 @@ void JSArrayBuffer::JSArrayBufferPrint(std::ostream& os) { // NOLINT
if (is_detachable()) os << "\n - detachable";
if (was_detached()) os << "\n - detached";
if (is_shared()) os << "\n - shared";
if (is_wasm_memory()) os << "\n - is_wasm_memory";
JSObjectPrintBody(os, *this, !was_detached());
}

View File

@ -52,7 +52,6 @@
#include "src/logging/counters.h"
#include "src/logging/log.h"
#include "src/numbers/hash-seed-inl.h"
#include "src/objects/backing-store.h"
#include "src/objects/elements.h"
#include "src/objects/frame-array-inl.h"
#include "src/objects/hash-table-inl.h"
@ -2975,7 +2974,7 @@ void Isolate::Deinit() {
optimizing_compile_dispatcher_ = nullptr;
}
BackingStore::RemoveSharedWasmMemoryObjects(this);
wasm_engine()->memory_tracker()->DeleteSharedMemoryObjectsOnIsolate(this);
heap_.mark_compact_collector()->EnsureSweepingCompleted();
heap_.memory_allocator()->unmapper()->EnsureUnmappingCompleted();

View File

@ -10,7 +10,6 @@
#include "src/execution/runtime-profiler.h"
#include "src/execution/simulator.h"
#include "src/logging/counters.h"
#include "src/objects/backing-store.h"
#include "src/roots/roots-inl.h"
#include "src/utils/memcopy.h"
#include "src/wasm/wasm-engine.h"
@ -302,7 +301,8 @@ Object StackGuard::HandleInterrupts() {
if (TestAndClear(&interrupt_flags, GROW_SHARED_MEMORY)) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"),
"V8.WasmGrowSharedMemory");
BackingStore::UpdateSharedWasmMemoryObjects(isolate_);
isolate_->wasm_engine()->memory_tracker()->UpdateSharedMemoryInstances(
isolate_);
}
if (TestAndClear(&interrupt_flags, DEOPT_MARKED_ALLOCATION_SITES)) {

View File

@ -21,8 +21,9 @@ void FreeBufferExtension::FreeBuffer(
const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Local<v8::ArrayBuffer> arrayBuffer = args[0].As<v8::ArrayBuffer>();
v8::ArrayBuffer::Contents contents = arrayBuffer->Externalize();
contents.Deleter()(contents.Data(), contents.ByteLength(),
contents.DeleterData());
Isolate* isolate = reinterpret_cast<Isolate*>(args.GetIsolate());
isolate->array_buffer_allocator()->Free(contents.Data(),
contents.ByteLength());
}
} // namespace internal

View File

@ -14,25 +14,32 @@
namespace v8 {
namespace internal {
namespace {
void FreeAllocationsHelper(
Heap* heap, const std::vector<JSArrayBuffer::Allocation>& allocations) {
for (JSArrayBuffer::Allocation alloc : allocations) {
JSArrayBuffer::FreeBackingStore(heap->isolate(), alloc);
}
}
} // namespace
void ArrayBufferCollector::QueueOrFreeGarbageAllocations(
std::vector<std::shared_ptr<BackingStore>> backing_stores) {
std::vector<JSArrayBuffer::Allocation> allocations) {
if (heap_->ShouldReduceMemory()) {
// Destruct the vector, which destructs the std::shared_ptrs, freeing
// the backing stores.
backing_stores.clear();
FreeAllocationsHelper(heap_, allocations);
} else {
base::MutexGuard guard(&allocations_mutex_);
allocations_.push_back(std::move(backing_stores));
allocations_.push_back(std::move(allocations));
}
}
void ArrayBufferCollector::PerformFreeAllocations() {
base::MutexGuard guard(&allocations_mutex_);
for (std::vector<std::shared_ptr<BackingStore>>& backing_stores :
for (const std::vector<JSArrayBuffer::Allocation>& allocations :
allocations_) {
// Destruct the vector, which destructs the std::shared_ptrs, freeing
// the backing stores.
backing_stores.clear();
FreeAllocationsHelper(heap_, allocations);
}
allocations_.clear();
}

View File

@ -31,7 +31,7 @@ class ArrayBufferCollector {
//
// FreeAllocations() potentially triggers a background task for processing.
void QueueOrFreeGarbageAllocations(
std::vector<std::shared_ptr<BackingStore>> allocations);
std::vector<JSArrayBuffer::Allocation> allocations);
// Calls FreeAllocations() on a background thread.
void FreeAllocations();
@ -45,7 +45,7 @@ class ArrayBufferCollector {
Heap* const heap_;
base::Mutex allocations_mutex_;
std::vector<std::vector<std::shared_ptr<BackingStore>>> allocations_;
std::vector<std::vector<JSArrayBuffer::Allocation>> allocations_;
};
} // namespace internal

View File

@ -12,19 +12,14 @@
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/objects.h"
#define TRACE_BS(...) /* redefine for tracing output */
namespace v8 {
namespace internal {
void ArrayBufferTracker::RegisterNew(
Heap* heap, JSArrayBuffer buffer,
std::shared_ptr<BackingStore> backing_store) {
if (!backing_store) return;
void ArrayBufferTracker::RegisterNew(Heap* heap, JSArrayBuffer buffer) {
if (buffer.backing_store() == nullptr) return;
// ArrayBuffer tracking works only for small objects.
DCHECK(!heap->IsLargeObject(buffer));
DCHECK_EQ(backing_store->buffer_start(), buffer.backing_store());
const size_t length = buffer.byte_length();
Page* page = Page::FromHeapObject(buffer);
@ -36,7 +31,7 @@ void ArrayBufferTracker::RegisterNew(
tracker = page->local_tracker();
}
DCHECK_NOT_NULL(tracker);
tracker->Add(buffer, std::move(backing_store));
tracker->Add(buffer, length);
}
// TODO(wez): Remove backing-store from external memory accounting.
@ -46,50 +41,34 @@ void ArrayBufferTracker::RegisterNew(
->AdjustAmountOfExternalAllocatedMemory(length);
}
std::shared_ptr<BackingStore> ArrayBufferTracker::Unregister(
Heap* heap, JSArrayBuffer buffer) {
std::shared_ptr<BackingStore> backing_store;
void ArrayBufferTracker::Unregister(Heap* heap, JSArrayBuffer buffer) {
if (buffer.backing_store() == nullptr) return;
const size_t length = buffer.byte_length();
Page* page = Page::FromHeapObject(buffer);
const size_t length = buffer.byte_length();
{
base::MutexGuard guard(page->mutex());
LocalArrayBufferTracker* tracker = page->local_tracker();
DCHECK_NOT_NULL(tracker);
backing_store = tracker->Remove(buffer);
tracker->Remove(buffer, length);
}
// TODO(wez): Remove backing-store from external memory accounting.
heap->update_external_memory(-static_cast<intptr_t>(length));
return backing_store;
}
std::shared_ptr<BackingStore> ArrayBufferTracker::Lookup(Heap* heap,
JSArrayBuffer buffer) {
std::shared_ptr<BackingStore> backing_store;
if (buffer.backing_store() == nullptr) return backing_store;
Page* page = Page::FromHeapObject(buffer);
base::MutexGuard guard(page->mutex());
LocalArrayBufferTracker* tracker = page->local_tracker();
DCHECK_NOT_NULL(tracker);
return tracker->Lookup(buffer);
}
template <typename Callback>
void LocalArrayBufferTracker::Free(Callback should_free) {
size_t freed_memory = 0;
Isolate* isolate = page_->heap()->isolate();
for (TrackingData::iterator it = array_buffers_.begin();
it != array_buffers_.end();) {
// Unchecked cast because the map might already be dead at this point.
JSArrayBuffer buffer = JSArrayBuffer::unchecked_cast(it->first);
const size_t length = buffer.byte_length();
const size_t length = it->second.length;
if (should_free(buffer)) {
// Destroy the shared pointer, (perhaps) freeing the backing store.
TRACE_BS("ABT:free bs=%p mem=%p (%zu bytes)\n", it->second.get(),
it->second->buffer_start(), it->second->byte_length());
JSArrayBuffer::FreeBackingStore(isolate, it->second);
it = array_buffers_.erase(it);
freed_memory += length;
} else {
@ -119,57 +98,35 @@ void ArrayBufferTracker::FreeDead(Page* page, MarkingState* marking_state) {
}
}
void LocalArrayBufferTracker::Add(JSArrayBuffer buffer,
std::shared_ptr<BackingStore> backing_store) {
void LocalArrayBufferTracker::Add(JSArrayBuffer buffer, size_t length) {
page_->IncrementExternalBackingStoreBytes(
ExternalBackingStoreType::kArrayBuffer, buffer.byte_length());
ExternalBackingStoreType::kArrayBuffer, length);
AddInternal(buffer, std::move(backing_store));
AddInternal(buffer, length);
}
void LocalArrayBufferTracker::AddInternal(
JSArrayBuffer buffer, std::shared_ptr<BackingStore> backing_store) {
auto ret = array_buffers_.insert({buffer, std::move(backing_store)});
void LocalArrayBufferTracker::AddInternal(JSArrayBuffer buffer, size_t length) {
auto ret = array_buffers_.insert(
{buffer,
{buffer.backing_store(), length, buffer.backing_store(),
buffer.is_wasm_memory()}});
USE(ret);
// Check that we indeed inserted a new value and did not overwrite an existing
// one (which would be a bug).
DCHECK(ret.second);
}
std::shared_ptr<BackingStore> LocalArrayBufferTracker::Remove(
JSArrayBuffer buffer) {
TrackingData::iterator it = array_buffers_.find(buffer);
void LocalArrayBufferTracker::Remove(JSArrayBuffer buffer, size_t length) {
page_->DecrementExternalBackingStoreBytes(
ExternalBackingStoreType::kArrayBuffer, length);
TrackingData::iterator it = array_buffers_.find(buffer);
// Check that we indeed find a key to remove.
DCHECK(it != array_buffers_.end());
// Steal the underlying shared pointer before erasing the entry.
std::shared_ptr<BackingStore> backing_store = std::move(it->second);
TRACE_BS("ABT:remove bs=%p mem=%p (%zu bytes)\n", backing_store.get(),
backing_store->buffer_start(), backing_store->byte_length());
// Erase the entry.
DCHECK_EQ(length, it->second.length);
array_buffers_.erase(it);
// Update accounting.
page_->DecrementExternalBackingStoreBytes(
ExternalBackingStoreType::kArrayBuffer, buffer.byte_length());
return backing_store;
}
std::shared_ptr<BackingStore> LocalArrayBufferTracker::Lookup(
JSArrayBuffer buffer) {
TrackingData::iterator it = array_buffers_.find(buffer);
if (it != array_buffers_.end()) {
return it->second;
}
return std::shared_ptr<BackingStore>();
}
#undef TRACE_BS
} // namespace internal
} // namespace v8

View File

@ -11,8 +11,6 @@
#include "src/heap/heap.h"
#include "src/heap/spaces.h"
#define TRACE_BS(...) /* redefine for tracing output */
namespace v8 {
namespace internal {
@ -22,7 +20,7 @@ LocalArrayBufferTracker::~LocalArrayBufferTracker() {
template <typename Callback>
void LocalArrayBufferTracker::Process(Callback callback) {
std::vector<std::shared_ptr<BackingStore>> backing_stores_to_free;
std::vector<JSArrayBuffer::Allocation> backing_stores_to_free;
TrackingData kept_array_buffers;
JSArrayBuffer new_buffer;
@ -34,9 +32,8 @@ void LocalArrayBufferTracker::Process(Callback callback) {
DCHECK_EQ(page_, Page::FromHeapObject(old_buffer));
const CallbackResult result = callback(old_buffer, &new_buffer);
if (result == kKeepEntry) {
kept_array_buffers.insert(std::move(*it));
kept_array_buffers.insert(*it);
} else if (result == kUpdateEntry) {
DCHECK_EQ(old_buffer.byte_length(), new_buffer.byte_length());
DCHECK(!new_buffer.is_null());
Page* target_page = Page::FromHeapObject(new_buffer);
{
@ -47,20 +44,22 @@ void LocalArrayBufferTracker::Process(Callback callback) {
tracker = target_page->local_tracker();
}
DCHECK_NOT_NULL(tracker);
const size_t length = old_buffer.byte_length();
const size_t length = it->second.length;
// We should decrement before adding to avoid potential overflows in
// the external memory counters.
tracker->AddInternal(new_buffer, std::move(it->second));
DCHECK_EQ(it->first.is_wasm_memory(), it->second.is_wasm_memory);
tracker->AddInternal(new_buffer, length);
MemoryChunk::MoveExternalBackingStoreBytes(
ExternalBackingStoreType::kArrayBuffer,
static_cast<MemoryChunk*>(page_),
static_cast<MemoryChunk*>(target_page), length);
}
} else if (result == kRemoveEntry) {
freed_memory += old_buffer.byte_length();
TRACE_BS("ABT:queue bs=%p mem=%p (%zu bytes)\n", it->second.get(),
it->second->buffer_start(), it->second->byte_length());
backing_stores_to_free.push_back(std::move(it->second));
freed_memory += it->second.length;
// We pass backing_store() and stored length to the collector for freeing
// the backing store. Wasm allocations will go through their own tracker
// based on the backing store.
backing_stores_to_free.push_back(it->second);
} else {
UNREACHABLE();
}
@ -149,4 +148,3 @@ void ArrayBufferTracker::TearDown(Heap* heap) {
} // namespace internal
} // namespace v8
#undef TRACE_BS

View File

@ -9,7 +9,6 @@
#include "src/base/platform/mutex.h"
#include "src/common/globals.h"
#include "src/objects/backing-store.h"
#include "src/objects/js-array-buffer.h"
#include "src/utils/allocation.h"
@ -32,12 +31,8 @@ class ArrayBufferTracker : public AllStatic {
// Register/unregister a new JSArrayBuffer |buffer| for tracking. Guards all
// access to the tracker by taking the page lock for the corresponding page.
inline static void RegisterNew(Heap* heap, JSArrayBuffer buffer,
std::shared_ptr<BackingStore>);
inline static std::shared_ptr<BackingStore> Unregister(Heap* heap,
JSArrayBuffer buffer);
inline static std::shared_ptr<BackingStore> Lookup(Heap* heap,
JSArrayBuffer buffer);
inline static void RegisterNew(Heap* heap, JSArrayBuffer buffer);
inline static void Unregister(Heap* heap, JSArrayBuffer buffer);
// Identifies all backing store pointers for dead JSArrayBuffers in new space.
// Does not take any locks and can only be called during Scavenge.
@ -75,10 +70,8 @@ class LocalArrayBufferTracker {
explicit LocalArrayBufferTracker(Page* page) : page_(page) {}
~LocalArrayBufferTracker();
inline void Add(JSArrayBuffer buffer,
std::shared_ptr<BackingStore> backing_store);
inline std::shared_ptr<BackingStore> Remove(JSArrayBuffer buffer);
inline std::shared_ptr<BackingStore> Lookup(JSArrayBuffer buffer);
inline void Add(JSArrayBuffer buffer, size_t length);
inline void Remove(JSArrayBuffer buffer, size_t length);
// Frees up array buffers.
//
@ -112,13 +105,17 @@ class LocalArrayBufferTracker {
}
};
// Keep track of the backing store and the corresponding length at time of
// registering. The length is accessed from JavaScript and can be a
// HeapNumber. The reason for tracking the length is that in the case of
// length being a HeapNumber, the buffer and its length may be stored on
// different memory pages, making it impossible to guarantee order of freeing.
using TrackingData =
std::unordered_map<JSArrayBuffer, std::shared_ptr<BackingStore>, Hasher>;
std::unordered_map<JSArrayBuffer, JSArrayBuffer::Allocation, Hasher>;
// Internal version of add that does not update counters. Requires separate
// logic for updating external memory counters.
inline void AddInternal(JSArrayBuffer buffer,
std::shared_ptr<BackingStore> backing_store);
inline void AddInternal(JSArrayBuffer buffer, size_t length);
Page* page_;
// The set contains raw heap pointers which are removed by the GC upon

View File

@ -2067,13 +2067,6 @@ void initialize_length<PropertyArray>(Handle<PropertyArray> array, int length) {
array->initialize_length(length);
}
inline void ZeroEmbedderFields(i::Handle<i::JSObject> obj) {
auto count = obj->GetEmbedderFieldCount();
for (int i = 0; i < count; i++) {
obj->SetEmbedderField(i, Smi::kZero);
}
}
} // namespace
template <typename T>
@ -3100,46 +3093,15 @@ Handle<SyntheticModule> Factory::NewSyntheticModule(
return module;
}
Handle<JSArrayBuffer> Factory::NewJSArrayBuffer(AllocationType allocation) {
Handle<Map> map(isolate()->native_context()->array_buffer_fun().initial_map(),
isolate());
auto result =
Handle<JSArrayBuffer>::cast(NewJSObjectFromMap(map, allocation));
ZeroEmbedderFields(result);
result->SetupEmpty(SharedFlag::kNotShared);
return result;
}
MaybeHandle<JSArrayBuffer> Factory::NewJSArrayBufferAndBackingStore(
size_t byte_length, InitializedFlag initialized,
AllocationType allocation) {
// TODO(titzer): Don't bother allocating a 0-length backing store.
// This is currently required because the embedder API for
// TypedArray::HasBuffer() checks if the backing store is nullptr.
// That check should be changed.
std::unique_ptr<BackingStore> backing_store = BackingStore::Allocate(
isolate(), byte_length, SharedFlag::kNotShared, initialized);
if (!backing_store) return MaybeHandle<JSArrayBuffer>();
Handle<Map> map(isolate()->native_context()->array_buffer_fun().initial_map(),
isolate());
auto array_buffer =
Handle<JSArrayBuffer>::cast(NewJSObjectFromMap(map, allocation));
array_buffer->Attach(std::move(backing_store));
ZeroEmbedderFields(array_buffer);
return array_buffer;
}
Handle<JSArrayBuffer> Factory::NewJSSharedArrayBuffer(
AllocationType allocation) {
Handle<Map> map(
isolate()->native_context()->shared_array_buffer_fun().initial_map(),
Handle<JSArrayBuffer> Factory::NewJSArrayBuffer(SharedFlag shared,
AllocationType allocation) {
Handle<JSFunction> array_buffer_fun(
shared == SharedFlag::kShared
? isolate()->native_context()->shared_array_buffer_fun()
: isolate()->native_context()->array_buffer_fun(),
isolate());
auto result =
Handle<JSArrayBuffer>::cast(NewJSObjectFromMap(map, allocation));
ZeroEmbedderFields(result);
result->SetupEmpty(SharedFlag::kShared);
return result;
Handle<Map> map(array_buffer_fun->initial_map(), isolate());
return Handle<JSArrayBuffer>::cast(NewJSObjectFromMap(map, allocation));
}
Handle<JSIteratorResult> Factory::NewJSIteratorResult(Handle<Object> value,
@ -3228,7 +3190,9 @@ Handle<JSArrayBufferView> Factory::NewJSArrayBufferView(
array_buffer_view->set_buffer(*buffer);
array_buffer_view->set_byte_offset(byte_offset);
array_buffer_view->set_byte_length(byte_length);
ZeroEmbedderFields(array_buffer_view);
for (int i = 0; i < v8::ArrayBufferView::kEmbedderFieldCount; i++) {
array_buffer_view->SetEmbedderField(i, Smi::kZero);
}
DCHECK_EQ(array_buffer_view->GetEmbedderFieldCount(),
v8::ArrayBufferView::kEmbedderFieldCount);
return array_buffer_view;
@ -4184,7 +4148,9 @@ Handle<JSPromise> Factory::NewJSPromiseWithoutHook(AllocationType allocation) {
NewJSObject(isolate()->promise_function(), allocation));
promise->set_reactions_or_result(Smi::kZero);
promise->set_flags(0);
ZeroEmbedderFields(promise);
for (int i = 0; i < v8::Promise::kEmbedderFieldCount; i++) {
promise->SetEmbedderField(i, Smi::kZero);
}
return promise;
}

View File

@ -75,8 +75,7 @@ class WeakCell;
struct SourceRange;
template <typename T>
class ZoneVector;
enum class SharedFlag : uint8_t;
enum class InitializedFlag : uint8_t;
enum class SharedFlag : uint32_t;
enum FunctionMode {
kWithNameBit = 1 << 0,
@ -697,14 +696,7 @@ class V8_EXPORT_PRIVATE Factory {
v8::Module::SyntheticModuleEvaluationSteps evaluation_steps);
Handle<JSArrayBuffer> NewJSArrayBuffer(
AllocationType allocation = AllocationType::kYoung);
MaybeHandle<JSArrayBuffer> NewJSArrayBufferAndBackingStore(
size_t byte_length, InitializedFlag initialized,
AllocationType allocation = AllocationType::kYoung);
Handle<JSArrayBuffer> NewJSSharedArrayBuffer(
AllocationType allocation = AllocationType::kYoung);
SharedFlag shared, AllocationType allocation = AllocationType::kYoung);
static void TypeAndSizeForElementsKind(ElementsKind kind,
ExternalArrayType* array_type,

View File

@ -2789,18 +2789,12 @@ HeapObject Heap::AlignWithFiller(HeapObject object, int object_size,
return object;
}
void Heap::RegisterBackingStore(JSArrayBuffer buffer,
std::shared_ptr<BackingStore> backing_store) {
ArrayBufferTracker::RegisterNew(this, buffer, std::move(backing_store));
void Heap::RegisterNewArrayBuffer(JSArrayBuffer buffer) {
ArrayBufferTracker::RegisterNew(this, buffer);
}
std::shared_ptr<BackingStore> Heap::UnregisterBackingStore(
JSArrayBuffer buffer) {
return ArrayBufferTracker::Unregister(this, buffer);
}
std::shared_ptr<BackingStore> Heap::LookupBackingStore(JSArrayBuffer buffer) {
return ArrayBufferTracker::Lookup(this, buffer);
void Heap::UnregisterArrayBuffer(JSArrayBuffer buffer) {
ArrayBufferTracker::Unregister(this, buffer);
}
void Heap::ConfigureInitialOldGenerationSize() {

View File

@ -45,7 +45,6 @@ class TestMemoryAllocatorScope;
} // namespace heap
class IncrementalMarking;
class BackingStore;
class JSArrayBuffer;
using v8::MemoryPressureLevel;
@ -1214,10 +1213,13 @@ class Heap {
// ===========================================================================
// ArrayBuffer tracking. =====================================================
// ===========================================================================
void RegisterBackingStore(JSArrayBuffer buffer,
std::shared_ptr<BackingStore> backing_store);
std::shared_ptr<BackingStore> UnregisterBackingStore(JSArrayBuffer buffer);
std::shared_ptr<BackingStore> LookupBackingStore(JSArrayBuffer buffer);
// TODO(gc): API usability: encapsulate mutation of JSArrayBuffer::is_external
// in the registration/unregistration APIs. Consider dropping the "New" from
// "RegisterNewArrayBuffer" because one can re-register a previously
// unregistered buffer, too, and the name is confusing.
void RegisterNewArrayBuffer(JSArrayBuffer buffer);
void UnregisterArrayBuffer(JSArrayBuffer buffer);
// ===========================================================================
// Allocation site tracking. =================================================

View File

@ -1,663 +0,0 @@
// Copyright 2019 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/objects/backing-store.h"
#include "src/execution/isolate.h"
#include "src/handles/global-handles.h"
#include "src/logging/counters.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-objects-inl.h"
#define TRACE_BS(...) /* redefine for tracing output */
namespace v8 {
namespace internal {
namespace {
#if V8_TARGET_ARCH_64_BIT
constexpr bool kUseGuardRegions = true;
#else
constexpr bool kUseGuardRegions = false;
#endif
#if V8_TARGET_ARCH_MIPS64
// MIPS64 has a user space of 2^40 bytes on most processors,
// address space limits needs to be smaller.
constexpr size_t kAddressSpaceLimit = 0x4000000000L; // 256 GiB
#elif V8_TARGET_ARCH_64_BIT
constexpr size_t kAddressSpaceLimit = 0x10100000000L; // 1 TiB + 4 GiB
#else
constexpr size_t kAddressSpaceLimit = 0xC0000000; // 3 GiB
#endif
constexpr uint64_t GB = 1024 * 1024 * 1024;
constexpr uint64_t kNegativeGuardSize = 2 * GB;
constexpr uint64_t kFullGuardSize = 10 * GB;
std::atomic<uint64_t> reserved_address_space_{0};
// Allocation results are reported to UMA
//
// See wasm_memory_allocation_result in counters.h
enum class AllocationStatus {
kSuccess, // Succeeded on the first try
kSuccessAfterRetry, // Succeeded after garbage collection
kAddressSpaceLimitReachedFailure, // Failed because Wasm is at its address
// space limit
kOtherFailure // Failed for an unknown reason
};
base::AddressRegion GetGuardedRegion(void* buffer_start, size_t byte_length) {
// Guard regions always look like this:
// |xxx(2GiB)xxx|.......(4GiB)..xxxxx|xxxxxx(4GiB)xxxxxx|
// ^ buffer_start
// ^ byte_length
// ^ negative guard region ^ positive guard region
Address start = reinterpret_cast<Address>(buffer_start);
DCHECK_EQ(8, sizeof(size_t)); // only use on 64-bit
DCHECK_EQ(0, start % AllocatePageSize());
return base::AddressRegion(start - (2 * GB),
static_cast<size_t>(kFullGuardSize));
}
void RecordStatus(Isolate* isolate, AllocationStatus status) {
isolate->counters()->wasm_memory_allocation_result()->AddSample(
static_cast<int>(status));
}
inline void DebugCheckZero(void* start, size_t byte_length) {
#if DEBUG
// Double check memory is zero-initialized.
const byte* bytes = reinterpret_cast<const byte*>(start);
for (size_t i = 0; i < byte_length; i++) {
DCHECK_EQ(0, bytes[i]);
}
#endif
}
} // namespace
bool BackingStore::ReserveAddressSpace(uint64_t num_bytes) {
uint64_t reservation_limit = kAddressSpaceLimit;
while (true) {
uint64_t old_count = reserved_address_space_.load();
if (old_count > reservation_limit) return false;
if (reservation_limit - old_count < num_bytes) return false;
if (reserved_address_space_.compare_exchange_weak(old_count,
old_count + num_bytes)) {
return true;
}
}
}
void BackingStore::ReleaseReservation(uint64_t num_bytes) {
uint64_t old_reserved = reserved_address_space_.fetch_sub(num_bytes);
USE(old_reserved);
DCHECK_LE(num_bytes, old_reserved);
}
// The backing store for a Wasm shared memory has a doubly linked list
// of weak global handles to the attached memory objects. The list
// is used to broadcast updates when a shared memory is grown.
struct SharedWasmMemoryData {
SharedWasmMemoryData* next_;
SharedWasmMemoryData* prev_;
Isolate* isolate_;
// A global (weak) handle to the memory object. Note that this handle
// is destroyed by the finalizer of the memory object, so it need not
// be manually destroyed here.
Handle<WasmMemoryObject> memory_object_;
SharedWasmMemoryData(Isolate* isolate, Handle<WasmMemoryObject> memory_object)
: next_(nullptr),
prev_(nullptr),
isolate_(isolate),
memory_object_(memory_object) {}
SharedWasmMemoryData* unlink() {
auto next = next_;
if (prev_) prev_->next_ = next_;
if (next_) next_->prev_ = prev_;
return next;
}
};
void BackingStore::Clear() {
buffer_start_ = nullptr;
byte_length_ = 0;
has_guard_regions_ = false;
type_specific_data_.v8_api_array_buffer_allocator = nullptr;
}
BackingStore::~BackingStore() {
if (globally_registered_) {
GlobalBackingStoreRegistry::Unregister(this);
globally_registered_ = false;
}
if (buffer_start_ == nullptr) return; // nothing to deallocate
if (is_wasm_memory_) {
if (is_shared_) {
// Deallocate the list of attached memory objects.
SharedWasmMemoryData* list = get_shared_wasm_memory_data();
while (list) {
auto old = list;
list = list->next_;
delete old;
}
type_specific_data_.shared_wasm_memory_data = nullptr;
}
// Wasm memories are always allocated through the page allocator.
auto region =
has_guard_regions_
? GetGuardedRegion(buffer_start_, byte_length_)
: base::AddressRegion(reinterpret_cast<Address>(buffer_start_),
byte_capacity_);
bool pages_were_freed =
region.size() == 0 /* no need to free any pages */ ||
FreePages(GetPlatformPageAllocator(),
reinterpret_cast<void*>(region.begin()), region.size());
CHECK(pages_were_freed);
BackingStore::ReleaseReservation(has_guard_regions_ ? kFullGuardSize
: byte_capacity_);
Clear();
return;
}
if (free_on_destruct_) {
// JSArrayBuffer backing store. Deallocate through the embedder's allocator.
auto allocator = reinterpret_cast<v8::ArrayBuffer::Allocator*>(
get_v8_api_array_buffer_allocator());
TRACE_BS("BS:free bs=%p mem=%p (%zu bytes)\n", this, buffer_start_,
byte_capacity_);
allocator->Free(buffer_start_, byte_length_);
}
Clear();
}
// Allocate a backing store using the array buffer allocator from the embedder.
std::unique_ptr<BackingStore> BackingStore::Allocate(
Isolate* isolate, size_t byte_length, SharedFlag shared,
InitializedFlag initialized) {
void* buffer_start = nullptr;
auto allocator = isolate->array_buffer_allocator();
CHECK_NOT_NULL(allocator);
if (byte_length != 0) {
auto counters = isolate->counters();
int mb_length = static_cast<int>(byte_length / MB);
if (mb_length > 0) {
counters->array_buffer_big_allocations()->AddSample(mb_length);
}
if (shared == SharedFlag::kShared) {
counters->shared_array_allocations()->AddSample(mb_length);
}
if (initialized == InitializedFlag::kZeroInitialized) {
buffer_start = allocator->Allocate(byte_length);
if (buffer_start) {
// TODO(titzer): node does not implement the zero-initialization API!
constexpr bool
kDebugCheckZeroDisabledDueToNodeNotImplementingZeroInitAPI = true;
if ((!(kDebugCheckZeroDisabledDueToNodeNotImplementingZeroInitAPI)) &&
!FLAG_mock_arraybuffer_allocator) {
DebugCheckZero(buffer_start, byte_length);
}
}
} else {
buffer_start = allocator->AllocateUninitialized(byte_length);
}
if (buffer_start == nullptr) {
// Allocation failed.
counters->array_buffer_new_size_failures()->AddSample(mb_length);
return {};
}
}
auto result = new BackingStore(buffer_start, // start
byte_length, // length
byte_length, // capacity
shared, // shared
false, // is_wasm_memory
true, // free_on_destruct
false); // has_guard_regions
TRACE_BS("BS:alloc bs=%p mem=%p (%zu bytes)\n", result,
result->buffer_start(), byte_length);
result->type_specific_data_.v8_api_array_buffer_allocator = allocator;
return std::unique_ptr<BackingStore>(result);
}
// Allocate a backing store for a Wasm memory. Always use the page allocator
// and add guard regions.
std::unique_ptr<BackingStore> BackingStore::TryAllocateWasmMemory(
Isolate* isolate, size_t initial_pages, size_t maximum_pages,
SharedFlag shared) {
bool guards = kUseGuardRegions;
// For accounting purposes, whether a GC was necessary.
bool did_retry = false;
// A helper to try running a function up to 3 times, executing a GC
// if the first and second attempts failed.
auto gc_retry = [&](const std::function<bool()>& fn) {
for (int i = 0; i < 3; i++) {
if (fn()) return true;
// Collect garbage and retry.
did_retry = true;
// TODO(wasm): try Heap::EagerlyFreeExternalMemory() first?
isolate->heap()->MemoryPressureNotification(
MemoryPressureLevel::kCritical, true);
}
return false;
};
// Compute size of reserved memory.
size_t reservation_size = 0;
size_t byte_capacity = 0;
if (guards) {
reservation_size = static_cast<size_t>(kFullGuardSize);
byte_capacity =
static_cast<size_t>(wasm::kV8MaxWasmMemoryPages * wasm::kWasmPageSize);
} else {
reservation_size = std::min(maximum_pages, wasm::kV8MaxWasmMemoryPages) *
wasm::kWasmPageSize;
byte_capacity = reservation_size;
}
//--------------------------------------------------------------------------
// 1. Enforce maximum address space reservation per engine.
//--------------------------------------------------------------------------
auto reserve_memory_space = [&] {
return BackingStore::ReserveAddressSpace(reservation_size);
};
if (!gc_retry(reserve_memory_space)) {
// Crash on out-of-memory if the correctness fuzzer is running.
if (FLAG_correctness_fuzzer_suppressions) {
FATAL("could not allocate wasm memory backing store");
}
RecordStatus(isolate, AllocationStatus::kAddressSpaceLimitReachedFailure);
return {};
}
//--------------------------------------------------------------------------
// 2. Allocate pages (inaccessible by default).
//--------------------------------------------------------------------------
void* allocation_base = nullptr;
auto allocate_pages = [&] {
allocation_base =
AllocatePages(GetPlatformPageAllocator(), nullptr, reservation_size,
wasm::kWasmPageSize, PageAllocator::kNoAccess);
return allocation_base != nullptr;
};
if (!gc_retry(allocate_pages)) {
// Page allocator could not reserve enough pages.
BackingStore::ReleaseReservation(reservation_size);
RecordStatus(isolate, AllocationStatus::kOtherFailure);
return {};
}
// Get a pointer to the start of the buffer, skipping negative guard region
// if necessary.
byte* buffer_start = reinterpret_cast<byte*>(allocation_base) +
(guards ? kNegativeGuardSize : 0);
//--------------------------------------------------------------------------
// 3. Commit the initial pages (allow read/write).
//--------------------------------------------------------------------------
size_t byte_length = initial_pages * wasm::kWasmPageSize;
auto commit_memory = [&] {
return byte_length == 0 ||
SetPermissions(GetPlatformPageAllocator(), buffer_start, byte_length,
PageAllocator::kReadWrite);
};
if (!gc_retry(commit_memory)) {
// SetPermissions put us over the process memory limit.
V8::FatalProcessOutOfMemory(nullptr, "BackingStore::AllocateWasmMemory()");
}
DebugCheckZero(buffer_start, byte_length); // touch the bytes.
RecordStatus(isolate, did_retry ? AllocationStatus::kSuccessAfterRetry
: AllocationStatus::kSuccess);
auto result = new BackingStore(buffer_start, // start
byte_length, // length
byte_capacity, // capacity
shared, // shared
true, // is_wasm_memory
true, // free_on_destruct
guards); // has_guard_regions
// Shared Wasm memories need an anchor for the memory object list.
if (shared == SharedFlag::kShared) {
result->type_specific_data_.shared_wasm_memory_data =
new SharedWasmMemoryData(nullptr, Handle<WasmMemoryObject>());
}
return std::unique_ptr<BackingStore>(result);
}
// Allocate a backing store for a Wasm memory. Always use the page allocator
// and add guard regions.
std::unique_ptr<BackingStore> BackingStore::AllocateWasmMemory(
Isolate* isolate, size_t initial_pages, size_t maximum_pages,
SharedFlag shared) {
// Wasm pages must be a multiple of the allocation page size.
DCHECK_EQ(0, wasm::kWasmPageSize % AllocatePageSize());
// Enforce engine limitation on the maximum number of pages.
if (initial_pages > wasm::kV8MaxWasmMemoryPages) return nullptr;
auto backing_store =
TryAllocateWasmMemory(isolate, initial_pages, maximum_pages, shared);
if (!backing_store && maximum_pages > initial_pages) {
// If allocating the maximum failed, try allocating with maximum set to
// initial
backing_store =
TryAllocateWasmMemory(isolate, initial_pages, initial_pages, shared);
}
return backing_store;
}
std::unique_ptr<BackingStore> BackingStore::CopyWasmMemory(
Isolate* isolate, std::shared_ptr<BackingStore> old,
size_t new_byte_length) {
DCHECK_GE(new_byte_length, old->byte_length());
// Note that we could allocate uninitialized to save initialization cost here,
// but since Wasm memories are allocated by the page allocator, the zeroing
// cost is already built-in.
// TODO(titzer): should we use a suitable maximum here?
auto new_backing_store = BackingStore::AllocateWasmMemory(
isolate, new_byte_length / wasm::kWasmPageSize,
new_byte_length / wasm::kWasmPageSize,
old->is_shared() ? SharedFlag::kShared : SharedFlag::kNotShared);
if (!new_backing_store ||
new_backing_store->has_guard_regions() != old->has_guard_regions()) {
return {};
}
size_t old_size = old->byte_length();
memcpy(new_backing_store->buffer_start(), old->buffer_start(), old_size);
return new_backing_store;
}
// Try to grow the size of a wasm memory in place, without realloc + copy.
bool BackingStore::GrowWasmMemoryInPlace(Isolate* isolate,
size_t new_byte_length) {
DCHECK(is_wasm_memory_);
DCHECK_EQ(0, new_byte_length % wasm::kWasmPageSize);
if (new_byte_length <= byte_length_) {
return true; // already big enough.
}
if (byte_capacity_ < new_byte_length) {
return false; // not enough capacity.
}
// Try to adjust the guard regions.
DCHECK_NOT_NULL(buffer_start_);
// If adjusting permissions fails, propagate error back to return
// failure to grow.
if (!i::SetPermissions(GetPlatformPageAllocator(), buffer_start_,
new_byte_length, PageAllocator::kReadWrite)) {
return false;
}
reinterpret_cast<v8::Isolate*>(isolate)
->AdjustAmountOfExternalAllocatedMemory(new_byte_length - byte_length_);
byte_length_ = new_byte_length;
return true;
}
void BackingStore::AttachSharedWasmMemoryObject(
Isolate* isolate, Handle<WasmMemoryObject> memory_object) {
DCHECK(is_wasm_memory_);
DCHECK(is_shared_);
// We need to take the global registry lock for this operation.
GlobalBackingStoreRegistry::AddSharedWasmMemoryObject(isolate, this,
memory_object);
}
void BackingStore::BroadcastSharedWasmMemoryGrow(
Isolate* isolate, std::shared_ptr<BackingStore> backing_store,
size_t new_size) {
// requires the global registry lock.
GlobalBackingStoreRegistry::BroadcastSharedWasmMemoryGrow(
isolate, backing_store, new_size);
}
void BackingStore::RemoveSharedWasmMemoryObjects(Isolate* isolate) {
// requires the global registry lock.
GlobalBackingStoreRegistry::Purge(isolate);
}
void BackingStore::UpdateSharedWasmMemoryObjects(Isolate* isolate) {
// requires the global registry lock.
GlobalBackingStoreRegistry::UpdateSharedWasmMemoryObjects(isolate);
}
std::unique_ptr<BackingStore> BackingStore::WrapAllocation(
Isolate* isolate, void* allocation_base, size_t allocation_length,
SharedFlag shared, bool free_on_destruct) {
auto result =
new BackingStore(allocation_base, allocation_length, allocation_length,
shared, false, free_on_destruct, false);
result->type_specific_data_.v8_api_array_buffer_allocator =
isolate->array_buffer_allocator();
TRACE_BS("BS:wrap bs=%p mem=%p (%zu bytes)\n", result, result->buffer_start(),
result->byte_length());
return std::unique_ptr<BackingStore>(result);
}
void* BackingStore::get_v8_api_array_buffer_allocator() {
CHECK(!is_wasm_memory_);
auto array_buffer_allocator =
type_specific_data_.v8_api_array_buffer_allocator;
CHECK_NOT_NULL(array_buffer_allocator);
return array_buffer_allocator;
}
SharedWasmMemoryData* BackingStore::get_shared_wasm_memory_data() {
CHECK(is_wasm_memory_ && is_shared_);
auto shared_wasm_memory_data = type_specific_data_.shared_wasm_memory_data;
CHECK(shared_wasm_memory_data);
return shared_wasm_memory_data;
}
namespace {
// Implementation details of GlobalBackingStoreRegistry.
struct GlobalBackingStoreRegistryImpl {
GlobalBackingStoreRegistryImpl() {}
base::Mutex mutex_;
std::unordered_map<const void*, std::weak_ptr<BackingStore>> map_;
};
base::LazyInstance<GlobalBackingStoreRegistryImpl>::type global_registry_impl_ =
LAZY_INSTANCE_INITIALIZER;
inline GlobalBackingStoreRegistryImpl* impl() {
return global_registry_impl_.Pointer();
}
void NopFinalizer(const v8::WeakCallbackInfo<void>& data) {
Address* global_handle_location =
reinterpret_cast<Address*>(data.GetParameter());
GlobalHandles::Destroy(global_handle_location);
}
} // namespace
void GlobalBackingStoreRegistry::Register(
std::shared_ptr<BackingStore> backing_store) {
if (!backing_store) return;
base::MutexGuard scope_lock(&impl()->mutex_);
if (backing_store->globally_registered_) return;
TRACE_BS("BS:reg bs=%p mem=%p (%zu bytes)\n", backing_store.get(),
backing_store->buffer_start(), backing_store->byte_length());
std::weak_ptr<BackingStore> weak = backing_store;
auto result = impl()->map_.insert({backing_store->buffer_start(), weak});
CHECK(result.second);
backing_store->globally_registered_ = true;
}
void GlobalBackingStoreRegistry::Unregister(BackingStore* backing_store) {
if (!backing_store->globally_registered_) return;
base::MutexGuard scope_lock(&impl()->mutex_);
const auto& result = impl()->map_.find(backing_store->buffer_start());
if (result != impl()->map_.end()) {
auto shared = result->second.lock();
if (shared) {
DCHECK_EQ(backing_store, shared.get());
}
impl()->map_.erase(result);
}
backing_store->globally_registered_ = false;
}
std::shared_ptr<BackingStore> GlobalBackingStoreRegistry::Lookup(
void* buffer_start, size_t length) {
base::MutexGuard scope_lock(&impl()->mutex_);
TRACE_BS("bs:lookup mem=%p (%zu bytes)\n", buffer_start, length);
const auto& result = impl()->map_.find(buffer_start);
if (result == impl()->map_.end()) {
return std::shared_ptr<BackingStore>();
}
auto backing_store = result->second.lock();
DCHECK_EQ(buffer_start, backing_store->buffer_start());
DCHECK_EQ(length, backing_store->byte_length());
return backing_store;
}
void GlobalBackingStoreRegistry::Purge(Isolate* isolate) {
base::MutexGuard scope_lock(&impl()->mutex_);
// Purge all entries in the map that refer to the given isolate.
for (auto& entry : impl()->map_) {
auto backing_store = entry.second.lock();
if (!backing_store) continue; // skip entries where weak ptr is null
if (!backing_store->is_wasm_memory()) continue; // skip non-wasm memory
SharedWasmMemoryData* list = backing_store->get_shared_wasm_memory_data();
while (list) {
if (list->isolate_ == isolate) {
// Unlink and delete the entry.
auto old = list;
list = list->unlink();
delete old;
continue;
}
list = list->next_;
}
}
}
void GlobalBackingStoreRegistry::AddSharedWasmMemoryObject(
Isolate* isolate, BackingStore* backing_store,
Handle<WasmMemoryObject> memory_object) {
// Create a weak global handle to the memory object.
Handle<WasmMemoryObject> weak_memory =
isolate->global_handles()->Create<WasmMemoryObject>(*memory_object);
Address* global_handle_location = weak_memory.location();
GlobalHandles::MakeWeak(global_handle_location, global_handle_location,
&NopFinalizer, v8::WeakCallbackType::kParameter);
SharedWasmMemoryData* entry = new SharedWasmMemoryData(isolate, weak_memory);
base::MutexGuard scope_lock(&impl()->mutex_);
SharedWasmMemoryData* list = backing_store->get_shared_wasm_memory_data();
SharedWasmMemoryData* next = list->next_;
if (next) {
next->prev_ = entry;
entry->next_ = next;
}
list->next_ = entry;
entry->prev_ = list;
}
void GlobalBackingStoreRegistry::BroadcastSharedWasmMemoryGrow(
Isolate* isolate, std::shared_ptr<BackingStore> backing_store,
size_t new_size) {
HandleScope scope(isolate);
std::vector<Handle<WasmMemoryObject>> memory_objects;
{
// We must gather the memory objects to update while holding the
// the lock, but we cannot allocate while we hold the lock, because
// a GC may cause another backing store to be deleted and unregistered,
// which also tries to take the lock.
base::MutexGuard scope_lock(&impl()->mutex_);
SharedWasmMemoryData* list =
backing_store->get_shared_wasm_memory_data()->next_;
while (list) {
if (list->isolate_ == isolate && !list->memory_object_.is_null()) {
memory_objects.push_back(list->memory_object_);
} else {
list->isolate_->stack_guard()->RequestGrowSharedMemory();
}
list = list->next_;
}
}
// Update memory objects without the lock held (GC may free backing stores).
// Note that we only gathered memory objects from this isolate, in which
// we are currently running. Therefore there cannot be any new (relevant)
// memory objects which are constructed, and none of the gathered memory
// objects can die.
for (auto memory_object : memory_objects) {
Handle<JSArrayBuffer> new_buffer =
isolate->factory()->NewJSSharedArrayBuffer();
new_buffer->Attach(backing_store);
memory_object->update_instances(isolate, new_buffer);
}
}
void GlobalBackingStoreRegistry::UpdateSharedWasmMemoryObjects(
Isolate* isolate) {
HandleScope scope(isolate);
std::vector<std::shared_ptr<BackingStore>> backing_stores;
std::vector<Handle<WasmMemoryObject>> memory_objects;
{
// We must gather the memory objects to update while holding the
// the lock, but we cannot allocate while we hold the lock, because
// a GC may cause another backing store to be deleted and unregistered,
// which also tries to take the lock.
base::MutexGuard scope_lock(&impl()->mutex_);
for (auto& entry : impl()->map_) {
auto backing_store = entry.second.lock();
if (!backing_store) continue; // skip entries where weak ptr is null
if (!backing_store->is_wasm_memory()) continue; // skip non-wasm memory
SharedWasmMemoryData* list =
backing_store->get_shared_wasm_memory_data()->next_;
while (list) {
Handle<WasmMemoryObject> memory_object = list->memory_object_;
if (list->isolate_ == isolate && !memory_object.is_null()) {
backing_stores.push_back(backing_store);
memory_objects.push_back(memory_object);
}
list = list->next_;
}
}
}
// Update memory objects without the lock held (GC may free backing stores).
// Note that we only gathered memory objects from this isolate, in which
// we are currently running. Therefore there cannot be any new (relevant)
// memory objects which are constructed, and none of the gathered memory
// objects can die.
for (size_t i = 0; i < backing_stores.size(); i++) {
auto backing_store = backing_stores[i];
auto memory_object = memory_objects[i];
Handle<JSArrayBuffer> old_buffer(memory_object->array_buffer(), isolate);
if (old_buffer->byte_length() != backing_store->byte_length()) {
Handle<JSArrayBuffer> new_buffer =
isolate->factory()->NewJSSharedArrayBuffer();
new_buffer->Attach(backing_store);
memory_object->update_instances(isolate, new_buffer);
}
}
}
} // namespace internal
} // namespace v8
#undef TRACE_BS

View File

@ -1,203 +0,0 @@
// Copyright 2019 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_OBJECTS_BACKING_STORE_H_
#define V8_OBJECTS_BACKING_STORE_H_
#include "src/handles/handles.h"
namespace v8 {
namespace internal {
class Isolate;
class WasmMemoryObject;
// Whether the backing store is shared or not.
enum class SharedFlag : uint8_t { kNotShared, kShared };
// Whether the backing store memory is initialied to zero or not.
enum class InitializedFlag : uint8_t { kUninitialized, kZeroInitialized };
// Internal information for shared wasm memories. E.g. contains
// a list of all memory objects (across all isolates) that share this
// backing store.
struct SharedWasmMemoryData;
// The {BackingStore} data structure stores all the low-level details about the
// backing store of an array buffer or Wasm memory, including its base address
// and length, whether it is shared, provided by the embedder, has guard
// regions, etc. Instances of this classes *own* the underlying memory
// when they are created through one of the {Allocate()} methods below,
// and the destructor frees the memory (and page allocation if necessary).
// Backing stores can also *wrap* embedder-allocated memory. In this case,
// they do not own the memory, and upon destruction, they do not deallocate it.
class V8_EXPORT_PRIVATE BackingStore {
public:
~BackingStore();
// Allocate an array buffer backing store using the default method,
// which currently is the embedder-provided array buffer allocator.
static std::unique_ptr<BackingStore> Allocate(Isolate* isolate,
size_t byte_length,
SharedFlag shared,
InitializedFlag initialized);
// Allocate the backing store for a Wasm memory.
static std::unique_ptr<BackingStore> AllocateWasmMemory(Isolate* isolate,
size_t initial_pages,
size_t maximum_pages,
SharedFlag shared);
// Allocate a new, larger, backing store for a Wasm memory and copy the
// contents of this backing store into it.
static std::unique_ptr<BackingStore> CopyWasmMemory(
Isolate* isolate, std::shared_ptr<BackingStore> old,
size_t new_byte_length);
// Reallocate the backing store for a Wasm memory. Either readjust the
// size of the given backing store or allocate a new one and copy.
static std::unique_ptr<BackingStore> ReallocateWasmMemory(
std::unique_ptr<BackingStore> existing, size_t new_byte_length);
// Create a backing store that wraps existing allocated memory.
// If {free_on_destruct} is {true}, the memory will be freed using the
// ArrayBufferAllocator::Free() callback when this backing store is
// destructed. Otherwise destructing the backing store will do nothing
// to the allocated memory.
static std::unique_ptr<BackingStore> WrapAllocation(Isolate* isolate,
void* allocation_base,
size_t allocation_length,
SharedFlag shared,
bool free_on_destruct);
// Accessors.
void* buffer_start() const { return buffer_start_; }
size_t byte_length() const { return byte_length_; }
size_t byte_capacity() const { return byte_length_; }
bool is_shared() const { return is_shared_; }
bool is_wasm_memory() const { return is_wasm_memory_; }
bool has_guard_regions() const { return has_guard_regions_; }
bool free_on_destruct() const { return free_on_destruct_; }
// Attempt to grow this backing store in place.
bool GrowWasmMemoryInPlace(Isolate* isolate, size_t new_byte_length);
// Attach the given memory object to this backing store. The memory object
// will be updated if this backing store is grown.
void AttachSharedWasmMemoryObject(Isolate* isolate,
Handle<WasmMemoryObject> memory_object);
// Send asynchronous updates to attached memory objects in other isolates
// after the backing store has been grown. Memory objects in this
// isolate are updated synchronously.
static void BroadcastSharedWasmMemoryGrow(Isolate* isolate,
std::shared_ptr<BackingStore>,
size_t new_size);
// TODO(wasm): address space limitations should be enforced in page alloc.
// These methods enforce a limit on the total amount of address space,
// which is used for both backing stores and wasm memory.
static bool ReserveAddressSpace(uint64_t num_bytes);
static void ReleaseReservation(uint64_t num_bytes);
// Remove all memory objects in the given isolate that refer to this
// backing store.
static void RemoveSharedWasmMemoryObjects(Isolate* isolate);
// Update all shared memory objects in this isolate (after a grow operation).
static void UpdateSharedWasmMemoryObjects(Isolate* isolate);
private:
friend class GlobalBackingStoreRegistry;
BackingStore(void* buffer_start, size_t byte_length, size_t byte_capacity,
SharedFlag shared, bool is_wasm_memory, bool free_on_destruct,
bool has_guard_regions)
: buffer_start_(buffer_start),
byte_length_(byte_length),
byte_capacity_(byte_capacity),
is_shared_(shared == SharedFlag::kShared),
is_wasm_memory_(is_wasm_memory),
free_on_destruct_(free_on_destruct),
has_guard_regions_(has_guard_regions),
globally_registered_(false) {
type_specific_data_.v8_api_array_buffer_allocator = nullptr;
}
void* buffer_start_ = nullptr;
size_t byte_length_ = 0;
size_t byte_capacity_ = 0;
bool is_shared_ : 1;
bool is_wasm_memory_ : 1;
bool free_on_destruct_ : 1;
bool has_guard_regions_ : 1;
bool globally_registered_ : 1;
union {
// If this backing store was allocated through the ArrayBufferAllocator API,
// this is a direct pointer to the API object for freeing the backing
// store.
// Note: we use {void*} here because we cannot forward-declare an inner
// class from the API.
void* v8_api_array_buffer_allocator;
// For shared Wasm memories, this is a list of all the attached memory
// objects, which is needed to grow shared backing stores.
SharedWasmMemoryData* shared_wasm_memory_data;
} type_specific_data_;
// Accessors for type-specific data.
void* get_v8_api_array_buffer_allocator();
SharedWasmMemoryData* get_shared_wasm_memory_data();
void Clear(); // Internally clears fields after deallocation.
static std::unique_ptr<BackingStore> TryAllocateWasmMemory(
Isolate* isolate, size_t initial_pages, size_t maximum_pages,
SharedFlag shared);
DISALLOW_COPY_AND_ASSIGN(BackingStore);
};
// A global, per-process mapping from buffer addresses to backing stores.
// This is generally only used for dealing with an embedder that has not
// migrated to the new API which should use proper pointers to manage
// backing stores.
class GlobalBackingStoreRegistry {
public:
// Register a backing store in the global registry. A mapping from the
// {buffer_start} to the backing store object will be added. The backing
// store will automatically unregister itself upon destruction.
static void Register(std::shared_ptr<BackingStore> backing_store);
// Look up a backing store based on the {buffer_start} pointer.
static std::shared_ptr<BackingStore> Lookup(void* buffer_start,
size_t length);
private:
friend class BackingStore;
// Unregister a backing store in the global registry.
static void Unregister(BackingStore* backing_store);
// Adds the given memory object to the backing store's weak list
// of memory objects (under the registry lock).
static void AddSharedWasmMemoryObject(Isolate* isolate,
BackingStore* backing_store,
Handle<WasmMemoryObject> memory_object);
// Purge any shared wasm memory lists that refer to this isolate.
static void Purge(Isolate* isolate);
// Broadcast updates to all attached memory objects.
static void BroadcastSharedWasmMemoryGrow(
Isolate* isolate, std::shared_ptr<BackingStore> backing_store,
size_t new_size);
// Update all shared memory objects in the given isolate.
static void UpdateSharedWasmMemoryObjects(Isolate* isolate);
};
} // namespace internal
} // namespace v8
#endif // V8_OBJECTS_BACKING_STORE_H_

View File

@ -48,6 +48,14 @@ size_t JSArrayBuffer::allocation_length() const {
if (backing_store() == nullptr) {
return 0;
}
// If this buffer is managed by the WasmMemoryTracker
if (is_wasm_memory()) {
const auto* data =
GetIsolate()->wasm_engine()->memory_tracker()->FindAllocationData(
backing_store());
DCHECK_NOT_NULL(data);
return data->allocation_length;
}
return byte_length();
}
@ -55,9 +63,25 @@ void* JSArrayBuffer::allocation_base() const {
if (backing_store() == nullptr) {
return nullptr;
}
// If this buffer is managed by the WasmMemoryTracker
if (is_wasm_memory()) {
const auto* data =
GetIsolate()->wasm_engine()->memory_tracker()->FindAllocationData(
backing_store());
DCHECK_NOT_NULL(data);
return data->allocation_base;
}
return backing_store();
}
bool JSArrayBuffer::is_wasm_memory() const {
return IsWasmMemoryBit::decode(bit_field());
}
void JSArrayBuffer::set_is_wasm_memory(bool is_wasm_memory) {
set_bit_field(IsWasmMemoryBit::update(bit_field(), is_wasm_memory));
}
void JSArrayBuffer::clear_padding() {
if (FIELD_SIZE(kOptionalPaddingOffset) != 0) {
DCHECK_EQ(4, FIELD_SIZE(kOptionalPaddingOffset));
@ -81,8 +105,6 @@ BIT_FIELD_ACCESSORS(JSArrayBuffer, bit_field, is_detachable,
JSArrayBuffer::IsDetachableBit)
BIT_FIELD_ACCESSORS(JSArrayBuffer, bit_field, was_detached,
JSArrayBuffer::WasDetachedBit)
BIT_FIELD_ACCESSORS(JSArrayBuffer, bit_field, is_asmjs_memory,
JSArrayBuffer::IsAsmJsMemoryBit)
BIT_FIELD_ACCESSORS(JSArrayBuffer, bit_field, is_shared,
JSArrayBuffer::IsSharedBit)

View File

@ -31,101 +31,167 @@ bool CanonicalNumericIndexString(Isolate* isolate, Handle<Object> s,
*index = result;
return true;
}
} // anonymous namespace
void JSArrayBuffer::SetupEmpty(SharedFlag shared) {
clear_padding();
set_bit_field(0);
set_is_shared(shared == SharedFlag::kShared);
set_is_detachable(shared != SharedFlag::kShared);
set_backing_store(nullptr);
set_byte_length(0);
inline int ConvertToMb(size_t size) {
return static_cast<int>(size / static_cast<size_t>(MB));
}
std::shared_ptr<BackingStore> JSArrayBuffer::Detach(
bool force_for_wasm_memory) {
if (was_detached()) return nullptr;
} // anonymous namespace
if (force_for_wasm_memory) {
// Skip the is_detachable() check.
} else if (!is_detachable()) {
// Not detachable, do nothing.
return nullptr;
}
Isolate* const isolate = GetIsolate();
auto backing_store = isolate->heap()->UnregisterBackingStore(*this);
CHECK_IMPLIES(force_for_wasm_memory && backing_store,
backing_store->is_wasm_memory());
if (isolate->IsArrayBufferDetachingIntact()) {
isolate->InvalidateArrayBufferDetachingProtector();
}
DCHECK(!is_shared());
DCHECK(!is_asmjs_memory());
void JSArrayBuffer::Detach() {
CHECK(is_detachable());
CHECK(!was_detached());
CHECK(is_external());
set_backing_store(nullptr);
set_byte_length(0);
set_was_detached(true);
return backing_store;
set_is_detachable(false);
// Invalidate the detaching protector.
Isolate* const isolate = GetIsolate();
if (isolate->IsArrayBufferDetachingIntact()) {
isolate->InvalidateArrayBufferDetachingProtector();
}
}
void JSArrayBuffer::Attach(std::shared_ptr<BackingStore> backing_store) {
SetupEmpty(backing_store->is_shared() ? SharedFlag::kShared
: SharedFlag::kNotShared);
if (backing_store->is_wasm_memory()) set_is_detachable(false);
set_backing_store(backing_store->buffer_start());
set_byte_length(backing_store->byte_length());
if (!backing_store->free_on_destruct()) set_is_external(true);
GetIsolate()->heap()->RegisterBackingStore(*this, backing_store);
void JSArrayBuffer::FreeBackingStoreFromMainThread() {
if (allocation_base() == nullptr) {
return;
}
FreeBackingStore(GetIsolate(), {allocation_base(), allocation_length(),
backing_store(), is_wasm_memory()});
// Zero out the backing store and allocation base to avoid dangling
// pointers.
set_backing_store(nullptr);
}
std::shared_ptr<BackingStore> JSArrayBuffer::GetBackingStore() {
return GetIsolate()->heap()->LookupBackingStore(*this);
// static
void JSArrayBuffer::FreeBackingStore(Isolate* isolate, Allocation allocation) {
if (allocation.is_wasm_memory) {
wasm::WasmMemoryTracker* memory_tracker =
isolate->wasm_engine()->memory_tracker();
memory_tracker->FreeWasmMemory(isolate, allocation.backing_store);
} else {
isolate->array_buffer_allocator()->Free(allocation.allocation_base,
allocation.length);
}
}
void JSArrayBuffer::Setup(Handle<JSArrayBuffer> array_buffer, Isolate* isolate,
bool is_external, void* data, size_t byte_length,
SharedFlag shared_flag, bool is_wasm_memory) {
DCHECK_EQ(array_buffer->GetEmbedderFieldCount(),
v8::ArrayBuffer::kEmbedderFieldCount);
DCHECK_LE(byte_length, JSArrayBuffer::kMaxByteLength);
for (int i = 0; i < v8::ArrayBuffer::kEmbedderFieldCount; i++) {
array_buffer->SetEmbedderField(i, Smi::kZero);
}
array_buffer->set_byte_length(byte_length);
array_buffer->set_bit_field(0);
array_buffer->clear_padding();
array_buffer->set_is_external(is_external);
array_buffer->set_is_detachable(shared_flag == SharedFlag::kNotShared);
array_buffer->set_is_shared(shared_flag == SharedFlag::kShared);
array_buffer->set_is_wasm_memory(is_wasm_memory);
// Initialize backing store at last to avoid handling of |JSArrayBuffers| that
// are currently being constructed in the |ArrayBufferTracker|. The
// registration method below handles the case of registering a buffer that has
// already been promoted.
array_buffer->set_backing_store(data);
if (data && !is_external) {
isolate->heap()->RegisterNewArrayBuffer(*array_buffer);
}
}
void JSArrayBuffer::SetupAsEmpty(Handle<JSArrayBuffer> array_buffer,
Isolate* isolate) {
Setup(array_buffer, isolate, false, nullptr, 0, SharedFlag::kNotShared);
}
bool JSArrayBuffer::SetupAllocatingData(Handle<JSArrayBuffer> array_buffer,
Isolate* isolate,
size_t allocated_length,
bool initialize,
SharedFlag shared_flag) {
void* data;
CHECK_NOT_NULL(isolate->array_buffer_allocator());
if (allocated_length != 0) {
if (allocated_length >= MB)
isolate->counters()->array_buffer_big_allocations()->AddSample(
ConvertToMb(allocated_length));
if (shared_flag == SharedFlag::kShared)
isolate->counters()->shared_array_allocations()->AddSample(
ConvertToMb(allocated_length));
if (initialize) {
data = isolate->array_buffer_allocator()->Allocate(allocated_length);
} else {
data = isolate->array_buffer_allocator()->AllocateUninitialized(
allocated_length);
}
if (data == nullptr) {
isolate->counters()->array_buffer_new_size_failures()->AddSample(
ConvertToMb(allocated_length));
SetupAsEmpty(array_buffer, isolate);
return false;
}
} else {
data = nullptr;
}
const bool is_external = false;
JSArrayBuffer::Setup(array_buffer, isolate, is_external, data,
allocated_length, shared_flag);
return true;
}
Handle<JSArrayBuffer> JSTypedArray::MaterializeArrayBuffer(
Handle<JSTypedArray> typed_array) {
DCHECK(typed_array->is_on_heap());
Isolate* isolate = typed_array->GetIsolate();
DCHECK(IsTypedArrayElementsKind(typed_array->GetElementsKind()));
Handle<JSArrayBuffer> buffer(JSArrayBuffer::cast(typed_array->buffer()),
isolate);
// This code does not know how to materialize from wasm buffers.
DCHECK(!buffer->is_wasm_memory());
void* backing_store =
isolate->array_buffer_allocator()->AllocateUninitialized(
typed_array->byte_length());
if (backing_store == nullptr) {
isolate->heap()->FatalProcessOutOfMemory(
"JSTypedArray::MaterializeArrayBuffer");
}
buffer->set_is_external(false);
DCHECK_EQ(buffer->byte_length(), typed_array->byte_length());
// Initialize backing store at last to avoid handling of |JSArrayBuffers| that
// are currently being constructed in the |ArrayBufferTracker|. The
// registration method below handles the case of registering a buffer that has
// already been promoted.
buffer->set_backing_store(backing_store);
// RegisterNewArrayBuffer expects a valid length for adjusting counters.
isolate->heap()->RegisterNewArrayBuffer(*buffer);
memcpy(buffer->backing_store(), typed_array->DataPtr(),
typed_array->byte_length());
typed_array->set_elements(ReadOnlyRoots(isolate).empty_byte_array());
typed_array->set_external_pointer(backing_store);
typed_array->set_base_pointer(Smi::kZero);
DCHECK(!typed_array->is_on_heap());
return buffer;
}
Handle<JSArrayBuffer> JSTypedArray::GetBuffer() {
Isolate* isolate = GetIsolate();
Handle<JSTypedArray> self(*this, isolate);
DCHECK(IsTypedArrayElementsKind(self->GetElementsKind()));
Handle<JSArrayBuffer> array_buffer(JSArrayBuffer::cast(self->buffer()),
isolate);
if (!is_on_heap()) {
// Already is off heap, so return the existing buffer.
Handle<JSArrayBuffer> array_buffer(JSArrayBuffer::cast(buffer()),
GetIsolate());
return array_buffer;
}
// The existing array buffer should be empty.
DCHECK_NULL(array_buffer->backing_store());
// Allocate a new backing store and attach it to the existing array buffer.
size_t byte_length = self->byte_length();
auto backing_store =
BackingStore::Allocate(isolate, byte_length, SharedFlag::kNotShared,
InitializedFlag::kUninitialized);
if (!backing_store) {
isolate->heap()->FatalProcessOutOfMemory("JSTypedArray::GetBuffer");
}
// Copy the elements into the backing store of the array buffer.
memcpy(backing_store->buffer_start(), self->DataPtr(), byte_length);
// Attach the backing store to the array buffer.
array_buffer->Attach(std::move(backing_store));
// Clear the elements of the typed array.
self->set_elements(ReadOnlyRoots(isolate).empty_byte_array());
self->set_external_pointer(array_buffer->backing_store());
self->set_base_pointer(Smi::kZero);
DCHECK(!self->is_on_heap());
return array_buffer;
Handle<JSTypedArray> self(*this, GetIsolate());
return MaterializeArrayBuffer(self);
}
// ES#sec-integer-indexed-exotic-objects-defineownproperty-p-desc

View File

@ -5,7 +5,6 @@
#ifndef V8_OBJECTS_JS_ARRAY_BUFFER_H_
#define V8_OBJECTS_JS_ARRAY_BUFFER_H_
#include "src/objects/backing-store.h"
#include "src/objects/js-objects.h"
// Has to be the last include (doesn't have include guards):
@ -14,6 +13,9 @@
namespace v8 {
namespace internal {
// Whether a JSArrayBuffer is a SharedArrayBuffer or not.
enum class SharedFlag : uint32_t { kNotShared, kShared };
class JSArrayBuffer : public JSObject {
public:
// The maximum length for JSArrayBuffer's supported by V8.
@ -49,8 +51,8 @@ class JSArrayBuffer : public JSObject {
V(IsExternalBit, bool, 1, _) \
V(IsDetachableBit, bool, 1, _) \
V(WasDetachedBit, bool, 1, _) \
V(IsAsmJsMemoryBit, bool, 1, _) \
V(IsSharedBit, bool, 1, _)
V(IsSharedBit, bool, 1, _) \
V(IsWasmMemoryBit, bool, 1, _)
DEFINE_BIT_FIELDS(JS_ARRAY_BUFFER_BIT_FIELD_FIELDS)
#undef JS_ARRAY_BUFFER_BIT_FIELD_FIELDS
@ -59,49 +61,57 @@ class JSArrayBuffer : public JSObject {
// memory block once all ArrayBuffers referencing it are collected by the GC.
DECL_BOOLEAN_ACCESSORS(is_external)
// [is_detachable]: false => this buffer cannot be detached.
// [is_detachable]: false indicates that this buffer cannot be detached.
DECL_BOOLEAN_ACCESSORS(is_detachable)
// [was_detached]: true => the buffer was previously detached.
// [was_detached]: true if the buffer was previously detached.
DECL_BOOLEAN_ACCESSORS(was_detached)
// [is_asmjs_memory]: true => this buffer was once used as asm.js memory.
DECL_BOOLEAN_ACCESSORS(is_asmjs_memory)
// [is_shared]: tells whether this is an ArrayBuffer or a SharedArrayBuffer.
DECL_BOOLEAN_ACCESSORS(is_shared)
// [is_wasm_memory]: whether the buffer is tracked by the WasmMemoryTracker.
DECL_BOOLEAN_ACCESSORS(is_wasm_memory)
DECL_CAST(JSArrayBuffer)
// Immediately after creating an array buffer, the internal untagged fields
// are garbage. They need to be initialized with either {SetupEmpty()} or
// have a backing store attached via {Attach()}.
void Detach();
// Setup an array buffer with no backing store.
V8_EXPORT_PRIVATE void SetupEmpty(SharedFlag shared);
struct Allocation {
Allocation(void* allocation_base, size_t length, void* backing_store,
bool is_wasm_memory)
: allocation_base(allocation_base),
length(length),
backing_store(backing_store),
is_wasm_memory(is_wasm_memory) {}
// Attach a backing store to this array buffer.
// (note: this registers it with src/heap/array-buffer-tracker.h)
V8_EXPORT_PRIVATE void Attach(std::shared_ptr<BackingStore> backing_store);
void* allocation_base;
size_t length;
void* backing_store;
bool is_wasm_memory;
};
// Detach the backing store from this array buffer if it is detachable
// and return a reference to the backing store object. This sets the
// internal pointer and length to 0 and unregisters the backing store
// from the array buffer tracker.
// If the array buffer is not detachable, this is a nop.
//
// Array buffers that wrap wasm memory objects are special in that they
// are normally not detachable, but can become detached as a side effect
// of growing the underlying memory object. The {force_for_wasm_memory} flag
// is used by the implementation of Wasm memory growth in order to bypass the
// non-detachable check.
V8_EXPORT_PRIVATE std::shared_ptr<BackingStore> Detach(
bool force_for_wasm_memory = false);
V8_EXPORT_PRIVATE void FreeBackingStoreFromMainThread();
V8_EXPORT_PRIVATE static void FreeBackingStore(Isolate* isolate,
Allocation allocation);
// Get a reference to backing store of this array buffer, if there is a
// backing store. Returns nullptr if there is no backing store (e.g. detached
// or a zero-length array buffer).
std::shared_ptr<BackingStore> GetBackingStore();
V8_EXPORT_PRIVATE static void Setup(
Handle<JSArrayBuffer> array_buffer, Isolate* isolate, bool is_external,
void* data, size_t allocated_length,
SharedFlag shared_flag = SharedFlag::kNotShared,
bool is_wasm_memory = false);
// Initialize the object as empty one to avoid confusing heap verifier if
// the failure happened in the middle of JSArrayBuffer construction.
V8_EXPORT_PRIVATE static void SetupAsEmpty(Handle<JSArrayBuffer> array_buffer,
Isolate* isolate);
// Returns false if array buffer contents could not be allocated.
// In this case, |array_buffer| will not be set up.
V8_EXPORT_PRIVATE static bool SetupAllocatingData(
Handle<JSArrayBuffer> array_buffer, Isolate* isolate,
size_t allocated_length, bool initialize = true,
SharedFlag shared_flag = SharedFlag::kNotShared) V8_WARN_UNUSED_RESULT;
// Dispatched behavior.
DECL_PRINTER(JSArrayBuffer)
@ -129,8 +139,6 @@ class JSArrayBuffer : public JSObject {
class BodyDescriptor;
OBJECT_CONSTRUCTORS(JSArrayBuffer, JSObject);
std::shared_ptr<BackingStore> ForceDetach();
};
class JSArrayBufferView : public JSObject {
@ -242,6 +250,9 @@ class JSTypedArray : public JSArrayBufferView {
#endif
private:
static Handle<JSArrayBuffer> MaterializeArrayBuffer(
Handle<JSTypedArray> typed_array);
OBJECT_CONSTRUCTORS(JSTypedArray, JSArrayBufferView);
};

View File

@ -923,8 +923,8 @@ Maybe<bool> ValueSerializer::WriteWasmMemory(Handle<WasmMemoryObject> object) {
return Nothing<bool>();
}
GlobalBackingStoreRegistry::Register(
object->array_buffer().GetBackingStore());
isolate_->wasm_engine()->memory_tracker()->RegisterWasmMemoryAsShared(
object, isolate_);
WriteTag(SerializationTag::kWasmMemoryTransfer);
WriteZigZag<int32_t>(object->maximum_pages());
@ -1697,13 +1697,16 @@ MaybeHandle<JSArrayBuffer> ValueDeserializer::ReadJSArrayBuffer(
byte_length > static_cast<size_t>(end_ - position_)) {
return MaybeHandle<JSArrayBuffer>();
}
MaybeHandle<JSArrayBuffer> result =
isolate_->factory()->NewJSArrayBufferAndBackingStore(
byte_length, InitializedFlag::kUninitialized, allocation_);
Handle<JSArrayBuffer> array_buffer;
if (!result.ToHandle(&array_buffer)) return result;
memcpy(array_buffer->backing_store(), position_, byte_length);
const bool should_initialize = false;
Handle<JSArrayBuffer> array_buffer = isolate_->factory()->NewJSArrayBuffer(
SharedFlag::kNotShared, allocation_);
if (!JSArrayBuffer::SetupAllocatingData(array_buffer, isolate_, byte_length,
should_initialize)) {
return MaybeHandle<JSArrayBuffer>();
}
if (byte_length > 0) {
memcpy(array_buffer->backing_store(), position_, byte_length);
}
position_ += byte_length;
AddObjectWithID(id, array_buffer);
return array_buffer;
@ -1870,6 +1873,9 @@ MaybeHandle<WasmMemoryObject> ValueDeserializer::ReadWasmMemory() {
Handle<WasmMemoryObject> result =
WasmMemoryObject::New(isolate_, buffer, maximum_pages);
isolate_->wasm_engine()->memory_tracker()->RegisterWasmMemoryAsShared(
result, isolate_);
AddObjectWithID(id, result);
return result;
}

View File

@ -1117,22 +1117,17 @@ RUNTIME_FUNCTION(Runtime_SerializeWasmModule) {
wasm::NativeModule* native_module = module_obj->native_module();
wasm::WasmSerializer wasm_serializer(native_module);
size_t byte_length = wasm_serializer.GetSerializedNativeModuleSize();
MaybeHandle<JSArrayBuffer> result =
isolate->factory()->NewJSArrayBufferAndBackingStore(
byte_length, InitializedFlag::kUninitialized);
Handle<JSArrayBuffer> array_buffer;
if (result.ToHandle(&array_buffer) &&
wasm_serializer.SerializeNativeModule(
{reinterpret_cast<uint8_t*>(array_buffer->backing_store()),
byte_length})) {
return *array_buffer;
size_t compiled_size = wasm_serializer.GetSerializedNativeModuleSize();
void* array_data = isolate->array_buffer_allocator()->Allocate(compiled_size);
Handle<JSArrayBuffer> array_buffer =
isolate->factory()->NewJSArrayBuffer(SharedFlag::kNotShared);
JSArrayBuffer::Setup(array_buffer, isolate, false, array_data, compiled_size);
if (!array_data ||
!wasm_serializer.SerializeNativeModule(
{reinterpret_cast<uint8_t*>(array_data), compiled_size})) {
return ReadOnlyRoots(isolate).undefined_value();
}
// Error. Return undefined.
return ReadOnlyRoots(isolate).undefined_value();
return *array_buffer;
}
// Take an array buffer and attempt to reconstruct a compiled wasm module.

View File

@ -27,7 +27,22 @@ RUNTIME_FUNCTION(Runtime_ArrayBufferDetach) {
isolate, NewTypeError(MessageTemplate::kNotTypedArray));
}
Handle<JSArrayBuffer> array_buffer = Handle<JSArrayBuffer>::cast(argument);
if (!array_buffer->is_detachable()) {
return ReadOnlyRoots(isolate).undefined_value();
}
if (array_buffer->backing_store() == nullptr) {
CHECK_EQ(0, array_buffer->byte_length());
return ReadOnlyRoots(isolate).undefined_value();
}
// Shared array buffers should never be detached.
CHECK(!array_buffer->is_shared());
DCHECK(!array_buffer->is_external());
void* backing_store = array_buffer->backing_store();
size_t byte_length = array_buffer->byte_length();
array_buffer->set_is_external(true);
isolate->heap()->UnregisterArrayBuffer(*array_buffer);
array_buffer->Detach();
isolate->array_buffer_allocator()->Free(backing_store, byte_length);
return ReadOnlyRoots(isolate).undefined_value();
}

View File

@ -295,23 +295,19 @@ HeapObject Deserializer::PostProcessNewObject(HeapObject obj,
if (!typed_array.is_on_heap()) {
Smi store_index(
reinterpret_cast<Address>(typed_array.external_pointer()));
auto backing_store = backing_stores_[store_index.value()];
auto start = backing_store
? reinterpret_cast<byte*>(backing_store->buffer_start())
: nullptr;
typed_array.set_external_pointer(start + typed_array.byte_offset());
byte* backing_store = off_heap_backing_stores_[store_index.value()] +
typed_array.byte_offset();
typed_array.set_external_pointer(backing_store);
}
} else if (obj.IsJSArrayBuffer()) {
JSArrayBuffer buffer = JSArrayBuffer::cast(obj);
// Only fixup for the off-heap case.
if (buffer.backing_store() != nullptr) {
Smi store_index(reinterpret_cast<Address>(buffer.backing_store()));
auto backing_store = backing_stores_[store_index.value()];
if (backing_store) {
buffer.Attach(backing_store);
} else {
buffer.SetupEmpty(SharedFlag::kNotShared);
}
void* backing_store = off_heap_backing_stores_[store_index.value()];
buffer.set_backing_store(backing_store);
isolate_->heap()->RegisterNewArrayBuffer(buffer);
}
} else if (obj.IsBytecodeArray()) {
// TODO(mythria): Remove these once we store the default values for these
@ -673,12 +669,12 @@ bool Deserializer::ReadData(TSlot current, TSlot limit,
case kOffHeapBackingStore: {
int byte_length = source_.GetInt();
std::unique_ptr<BackingStore> backing_store =
BackingStore::Allocate(isolate, byte_length, SharedFlag::kNotShared,
InitializedFlag::kUninitialized);
byte* backing_store = static_cast<byte*>(
isolate->array_buffer_allocator()->AllocateUninitialized(
byte_length));
CHECK_NOT_NULL(backing_store);
source_.CopyRaw(backing_store->buffer_start(), byte_length);
backing_stores_.push_back(std::move(backing_store));
source_.CopyRaw(backing_store, byte_length);
off_heap_backing_stores_.push_back(backing_store);
break;
}

View File

@ -10,7 +10,6 @@
#include "src/objects/allocation-site.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/backing-store.h"
#include "src/objects/code.h"
#include "src/objects/js-array.h"
#include "src/objects/map.h"
@ -57,8 +56,7 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
allocator()->DecodeReservation(data->Reservations());
// We start the indices here at 1, so that we can distinguish between an
// actual index and a nullptr in a deserialized object requiring fix-up.
std::shared_ptr<BackingStore> backing_store;
backing_stores_.push_back(std::move(backing_store));
off_heap_backing_stores_.push_back(nullptr);
}
void Initialize(Isolate* isolate);
@ -175,7 +173,7 @@ class V8_EXPORT_PRIVATE Deserializer : public SerializerDeserializer {
std::vector<CallHandlerInfo> call_handler_infos_;
std::vector<Handle<String>> new_internalized_strings_;
std::vector<Handle<Script>> new_scripts_;
std::vector<std::shared_ptr<BackingStore>> backing_stores_;
std::vector<byte*> off_heap_backing_stores_;
DeserializerAllocator allocator_;
const bool deserializing_user_code_;

View File

@ -2200,10 +2200,9 @@ auto Memory::make(Store* store_abs, const MemoryType* type) -> own<Memory*> {
if (maximum < minimum) return nullptr;
if (maximum > i::wasm::kSpecMaxWasmMemoryPages) return nullptr;
}
// TODO(wasm+): Support shared memory.
i::SharedFlag shared = i::SharedFlag::kNotShared;
bool is_shared = false; // TODO(wasm+): Support shared memory.
i::Handle<i::WasmMemoryObject> memory_obj;
if (!i::WasmMemoryObject::New(isolate, minimum, maximum, shared)
if (!i::WasmMemoryObject::New(isolate, minimum, maximum, is_shared)
.ToHandle(&memory_obj)) {
return own<Memory*>();
}

View File

@ -31,6 +31,7 @@
#include "src/wasm/wasm-import-wrapper-cache.h"
#include "src/wasm/wasm-js.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-memory.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-opcodes.h"
#include "src/wasm/wasm-result.h"

View File

@ -93,7 +93,7 @@ class InstanceBuilder {
InstanceBuilder(Isolate* isolate, ErrorThrower* thrower,
Handle<WasmModuleObject> module_object,
MaybeHandle<JSReceiver> ffi,
MaybeHandle<JSArrayBuffer> memory_buffer);
MaybeHandle<JSArrayBuffer> memory);
// Build an instance, in all of its glory.
MaybeHandle<WasmInstanceObject> Build();
@ -114,8 +114,7 @@ class InstanceBuilder {
ErrorThrower* thrower_;
Handle<WasmModuleObject> module_object_;
MaybeHandle<JSReceiver> ffi_;
MaybeHandle<JSArrayBuffer> memory_buffer_;
Handle<WasmMemoryObject> memory_object_;
MaybeHandle<JSArrayBuffer> memory_;
Handle<JSArrayBuffer> untagged_globals_;
Handle<FixedArray> tagged_globals_;
std::vector<Handle<WasmExceptionObject>> exception_wrappers_;
@ -166,11 +165,9 @@ class InstanceBuilder {
void SanitizeImports();
// Find the imported memory if there is one.
bool FindImportedMemory();
// Allocate the memory.
bool AllocateMemory();
// Find the imported memory buffer if there is one. This is used to see if we
// need to recompile with bounds checks before creating the instance.
MaybeHandle<JSArrayBuffer> FindImportedMemoryBuffer() const;
// Processes a single imported function.
bool ProcessImportedFunction(Handle<WasmInstanceObject> instance,
@ -224,6 +221,9 @@ class InstanceBuilder {
// Process initialization of globals.
void InitGlobals(Handle<WasmInstanceObject> instance);
// Allocate memory for a module instance as a new JSArrayBuffer.
Handle<JSArrayBuffer> AllocateMemory(uint32_t initial_pages,
uint32_t maximum_pages);
bool NeedsWrappers() const;
@ -243,9 +243,8 @@ class InstanceBuilder {
MaybeHandle<WasmInstanceObject> InstantiateToInstanceObject(
Isolate* isolate, ErrorThrower* thrower,
Handle<WasmModuleObject> module_object, MaybeHandle<JSReceiver> imports,
MaybeHandle<JSArrayBuffer> memory_buffer) {
InstanceBuilder builder(isolate, thrower, module_object, imports,
memory_buffer);
MaybeHandle<JSArrayBuffer> memory) {
InstanceBuilder builder(isolate, thrower, module_object, imports, memory);
auto instance = builder.Build();
if (!instance.is_null() && builder.ExecuteStartFunction()) {
return instance;
@ -257,14 +256,14 @@ MaybeHandle<WasmInstanceObject> InstantiateToInstanceObject(
InstanceBuilder::InstanceBuilder(Isolate* isolate, ErrorThrower* thrower,
Handle<WasmModuleObject> module_object,
MaybeHandle<JSReceiver> ffi,
MaybeHandle<JSArrayBuffer> memory_buffer)
MaybeHandle<JSArrayBuffer> memory)
: isolate_(isolate),
enabled_(module_object->native_module()->enabled_features()),
module_(module_object->module()),
thrower_(thrower),
module_object_(module_object),
ffi_(ffi),
memory_buffer_(memory_buffer) {
memory_(memory) {
sanitized_imports_.reserve(module_->import_table.size());
}
@ -290,7 +289,7 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
NativeModule* native_module = module_object_->native_module();
//--------------------------------------------------------------------------
// Set up the memory buffer and memory objects.
// Allocate the memory array buffer.
//--------------------------------------------------------------------------
uint32_t initial_pages = module_->initial_pages;
auto initial_pages_counter = SELECT_WASM_COUNTER(
@ -302,23 +301,31 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
isolate_->counters()->wasm_wasm_max_mem_pages_count();
max_pages_counter->AddSample(module_->maximum_pages);
}
// Asm.js has memory_ already set at this point, so we don't want to
// overwrite it.
if (memory_.is_null()) {
memory_ = FindImportedMemoryBuffer();
}
if (!memory_.is_null()) {
// Set externally passed ArrayBuffer non detachable.
Handle<JSArrayBuffer> memory = memory_.ToHandleChecked();
memory->set_is_detachable(false);
if (memory_buffer_.is_null()) {
// Search for imported memory first.
if (!FindImportedMemory()) {
if (!AllocateMemory()) {
DCHECK(isolate_->has_pending_exception() || thrower_->error());
return {};
}
DCHECK_IMPLIES(native_module->use_trap_handler(),
module_->origin == kAsmJsOrigin ||
memory->is_wasm_memory() ||
memory->backing_store() == nullptr);
} else if (initial_pages > 0 || native_module->use_trap_handler()) {
// We need to unconditionally create a guard region if using trap handlers,
// even when the size is zero to prevent null-dereference issues
// (e.g. https://crbug.com/769637).
// Allocate memory if the initial size is more than 0 pages.
memory_ = AllocateMemory(initial_pages, module_->maximum_pages);
if (memory_.is_null()) {
// failed to allocate memory
DCHECK(isolate_->has_pending_exception() || thrower_->error());
return {};
}
} else {
// Asm.js has {memory_buffer_} already set at this point.
DCHECK_EQ(kAsmJsOrigin, module_->origin);
// asm.js instantiation should have set these flags.
DCHECK(!memory_buffer_.ToHandleChecked()->is_detachable());
DCHECK(memory_buffer_.ToHandleChecked()->is_asmjs_memory());
memory_object_ =
WasmMemoryObject::New(isolate_, memory_buffer_, kV8MaxWasmMemoryPages);
}
//--------------------------------------------------------------------------
@ -327,42 +334,33 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
TRACE("New module instantiation for %p\n", native_module);
Handle<WasmInstanceObject> instance =
WasmInstanceObject::New(isolate_, module_object_);
//--------------------------------------------------------------------------
// Attach the memory to the instance.
//--------------------------------------------------------------------------
if (module_->has_memory) {
DCHECK(!memory_object_.is_null());
if (!instance->has_memory_object()) {
instance->set_memory_object(*memory_object_);
}
// Add the instance object to the list of instances for this memory.
WasmMemoryObject::AddInstance(isolate_, memory_object_, instance);
// Double-check the {memory} array buffer matches the instance.
Handle<JSArrayBuffer> memory = memory_buffer_.ToHandleChecked();
CHECK_EQ(instance->memory_size(), memory->byte_length());
CHECK_EQ(instance->memory_start(), memory->backing_store());
}
NativeModuleModificationScope native_modification_scope(native_module);
//--------------------------------------------------------------------------
// Set up the globals for the new instance.
//--------------------------------------------------------------------------
uint32_t untagged_globals_buffer_size = module_->untagged_globals_buffer_size;
if (untagged_globals_buffer_size > 0) {
MaybeHandle<JSArrayBuffer> result =
isolate_->factory()->NewJSArrayBufferAndBackingStore(
untagged_globals_buffer_size, InitializedFlag::kZeroInitialized,
AllocationType::kOld);
if (!result.ToHandle(&untagged_globals_)) {
void* backing_store = isolate_->array_buffer_allocator()->Allocate(
untagged_globals_buffer_size);
if (backing_store == nullptr) {
thrower_->RangeError("Out of memory: wasm globals");
return {};
}
untagged_globals_ = isolate_->factory()->NewJSArrayBuffer(
SharedFlag::kNotShared, AllocationType::kOld);
constexpr bool is_external = false;
constexpr bool is_wasm_memory = false;
JSArrayBuffer::Setup(untagged_globals_, isolate_, is_external,
backing_store, untagged_globals_buffer_size,
SharedFlag::kNotShared, is_wasm_memory);
if (untagged_globals_.is_null()) {
thrower_->RangeError("Out of memory: wasm globals");
return {};
}
instance->set_untagged_globals_buffer(*untagged_globals_);
instance->set_globals_start(
reinterpret_cast<byte*>(untagged_globals_->backing_store()));
instance->set_untagged_globals_buffer(*untagged_globals_);
}
uint32_t tagged_globals_buffer_size = module_->tagged_globals_buffer_size;
@ -428,8 +426,6 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
instance->set_indirect_function_tables(*tables);
}
NativeModuleModificationScope native_modification_scope(native_module);
//--------------------------------------------------------------------------
// Process the imports for the module.
//--------------------------------------------------------------------------
@ -455,6 +451,30 @@ MaybeHandle<WasmInstanceObject> InstanceBuilder::Build() {
InitializeExceptions(instance);
}
//--------------------------------------------------------------------------
// Create the WebAssembly.Memory object.
//--------------------------------------------------------------------------
if (module_->has_memory) {
if (!instance->has_memory_object()) {
// No memory object exists. Create one.
Handle<WasmMemoryObject> memory_object = WasmMemoryObject::New(
isolate_, memory_,
module_->maximum_pages != 0 ? module_->maximum_pages : -1);
instance->set_memory_object(*memory_object);
}
// Add the instance object to the list of instances for this memory.
Handle<WasmMemoryObject> memory_object(instance->memory_object(), isolate_);
WasmMemoryObject::AddInstance(isolate_, memory_object, instance);
if (!memory_.is_null()) {
// Double-check the {memory} array buffer matches the instance.
Handle<JSArrayBuffer> memory = memory_.ToHandleChecked();
CHECK_EQ(instance->memory_size(), memory->byte_length());
CHECK_EQ(instance->memory_start(), memory->backing_store());
}
}
// The bulk memory proposal changes the MVP behavior here; the segments are
// written as if `memory.init` and `table.init` are executed directly, and
// not bounds checked ahead of time.
@ -787,21 +807,22 @@ void InstanceBuilder::SanitizeImports() {
}
}
bool InstanceBuilder::FindImportedMemory() {
MaybeHandle<JSArrayBuffer> InstanceBuilder::FindImportedMemoryBuffer() const {
DCHECK_EQ(module_->import_table.size(), sanitized_imports_.size());
for (size_t index = 0; index < module_->import_table.size(); index++) {
WasmImport import = module_->import_table[index];
const WasmImport& import = module_->import_table[index];
if (import.kind == kExternalMemory) {
auto& value = sanitized_imports_[index].value;
if (!value->IsWasmMemoryObject()) return false;
memory_object_ = Handle<WasmMemoryObject>::cast(value);
memory_buffer_ =
Handle<JSArrayBuffer>(memory_object_->array_buffer(), isolate_);
return true;
const auto& value = sanitized_imports_[index].value;
if (!value->IsWasmMemoryObject()) {
return {};
}
auto memory = Handle<WasmMemoryObject>::cast(value);
Handle<JSArrayBuffer> buffer(memory->array_buffer(), isolate_);
return buffer;
}
}
return false;
return {};
}
bool InstanceBuilder::ProcessImportedFunction(
@ -988,19 +1009,19 @@ bool InstanceBuilder::ProcessImportedMemory(Handle<WasmInstanceObject> instance,
Handle<String> module_name,
Handle<String> import_name,
Handle<Object> value) {
// Validation should have failed if more than one memory object was
// provided.
DCHECK(!instance->has_memory_object());
if (!value->IsWasmMemoryObject()) {
ReportLinkError("memory import must be a WebAssembly.Memory object",
import_index, module_name, import_name);
return false;
}
auto memory_object = Handle<WasmMemoryObject>::cast(value);
// The imported memory should have been already set up early.
CHECK_EQ(instance->memory_object(), *memory_object);
Handle<JSArrayBuffer> buffer(memory_object_->array_buffer(), isolate_);
auto memory = Handle<WasmMemoryObject>::cast(value);
instance->set_memory_object(*memory);
Handle<JSArrayBuffer> buffer(memory->array_buffer(), isolate_);
// memory_ should have already been assigned in Build().
DCHECK_EQ(*memory_buffer_.ToHandleChecked(), *buffer);
DCHECK_EQ(*memory_.ToHandleChecked(), *buffer);
uint32_t imported_cur_pages =
static_cast<uint32_t>(buffer->byte_length() / kWasmPageSize);
if (imported_cur_pages < module_->initial_pages) {
@ -1009,7 +1030,7 @@ bool InstanceBuilder::ProcessImportedMemory(Handle<WasmInstanceObject> instance,
imported_cur_pages);
return false;
}
int32_t imported_maximum_pages = memory_object_->maximum_pages();
int32_t imported_maximum_pages = memory->maximum_pages();
if (module_->has_maximum_pages) {
if (imported_maximum_pages < 0) {
thrower_->LinkError(
@ -1383,27 +1404,27 @@ void InstanceBuilder::InitGlobals(Handle<WasmInstanceObject> instance) {
}
// Allocate memory for a module instance as a new JSArrayBuffer.
bool InstanceBuilder::AllocateMemory() {
auto initial_pages = module_->initial_pages;
auto maximum_pages = module_->has_maximum_pages ? module_->maximum_pages : -1;
Handle<JSArrayBuffer> InstanceBuilder::AllocateMemory(uint32_t initial_pages,
uint32_t maximum_pages) {
if (initial_pages > max_mem_pages()) {
thrower_->RangeError("Out of memory: wasm memory too large");
return false;
return Handle<JSArrayBuffer>::null();
}
auto shared = (module_->has_shared_memory && enabled_.threads)
? SharedFlag::kShared
: SharedFlag::kNotShared;
MaybeHandle<WasmMemoryObject> result =
WasmMemoryObject::New(isolate_, initial_pages, maximum_pages, shared);
if (!result.ToHandle(&memory_object_)) {
thrower_->RangeError("Out of memory: wasm memory");
return false;
const bool is_shared_memory = module_->has_shared_memory && enabled_.threads;
Handle<JSArrayBuffer> mem_buffer;
if (is_shared_memory) {
if (!NewSharedArrayBuffer(isolate_, initial_pages * kWasmPageSize,
maximum_pages * kWasmPageSize)
.ToHandle(&mem_buffer)) {
thrower_->RangeError("Out of memory: wasm shared memory");
}
} else {
if (!NewArrayBuffer(isolate_, initial_pages * kWasmPageSize)
.ToHandle(&mem_buffer)) {
thrower_->RangeError("Out of memory: wasm memory");
}
}
memory_buffer_ =
Handle<JSArrayBuffer>(memory_object_->array_buffer(), isolate_);
return true;
return mem_buffer;
}
bool InstanceBuilder::NeedsWrappers() const {

View File

@ -1152,8 +1152,10 @@ NativeModule::~NativeModule() {
import_wrapper_cache_.reset();
}
WasmCodeManager::WasmCodeManager(size_t max_committed)
: max_committed_code_space_(max_committed),
WasmCodeManager::WasmCodeManager(WasmMemoryTracker* memory_tracker,
size_t max_committed)
: memory_tracker_(memory_tracker),
max_committed_code_space_(max_committed),
#if defined(V8_OS_WIN_X64)
is_win64_unwind_info_disabled_for_testing_(false),
#endif
@ -1230,12 +1232,12 @@ VirtualMemory WasmCodeManager::TryAllocate(size_t size, void* hint) {
DCHECK_GT(size, 0);
size_t allocate_page_size = page_allocator->AllocatePageSize();
size = RoundUp(size, allocate_page_size);
if (!BackingStore::ReserveAddressSpace(size)) return {};
if (!memory_tracker_->ReserveAddressSpace(size)) return {};
if (hint == nullptr) hint = page_allocator->GetRandomMmapAddr();
VirtualMemory mem(page_allocator, size, hint, allocate_page_size);
if (!mem.IsReserved()) {
BackingStore::ReleaseReservation(size);
memory_tracker_->ReleaseReservation(size);
return {};
}
TRACE_HEAP("VMem alloc: 0x%" PRIxPTR ":0x%" PRIxPTR " (%zu)\n", mem.address(),
@ -1462,7 +1464,7 @@ void WasmCodeManager::FreeNativeModule(Vector<VirtualMemory> owned_code_space,
#endif
lookup_map_.erase(code_space.address());
BackingStore::ReleaseReservation(code_space.size());
memory_tracker_->ReleaseReservation(code_space.size());
code_space.Free();
DCHECK(!code_space.IsReserved());
}

View File

@ -38,6 +38,7 @@ class NativeModule;
class WasmCodeManager;
struct WasmCompilationResult;
class WasmEngine;
class WasmMemoryTracker;
class WasmImportWrapperCache;
struct WasmModule;
@ -595,7 +596,8 @@ class V8_EXPORT_PRIVATE NativeModule final {
class V8_EXPORT_PRIVATE WasmCodeManager final {
public:
explicit WasmCodeManager(size_t max_committed);
explicit WasmCodeManager(WasmMemoryTracker* memory_tracker,
size_t max_committed);
#ifdef DEBUG
~WasmCodeManager() {
@ -644,6 +646,8 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
void AssignRange(base::AddressRegion, NativeModule*);
WasmMemoryTracker* const memory_tracker_;
size_t max_committed_code_space_;
#if defined(V8_OS_WIN_X64)

View File

@ -227,7 +227,8 @@ struct WasmEngine::NativeModuleInfo {
int8_t num_code_gcs_triggered = 0;
};
WasmEngine::WasmEngine() : code_manager_(FLAG_wasm_max_code_space * MB) {}
WasmEngine::WasmEngine()
: code_manager_(&memory_tracker_, FLAG_wasm_max_code_space * MB) {}
WasmEngine::~WasmEngine() {
// Synchronize on all background compile tasks.

View File

@ -10,6 +10,7 @@
#include "src/tasks/cancelable-task.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-memory.h"
#include "src/wasm/wasm-tier.h"
#include "src/zone/accounting-allocator.h"
@ -22,7 +23,6 @@ class CompilationStatistics;
class HeapNumber;
class WasmInstanceObject;
class WasmModuleObject;
class JSArrayBuffer;
namespace wasm {
@ -120,6 +120,8 @@ class V8_EXPORT_PRIVATE WasmEngine {
WasmCodeManager* code_manager() { return &code_manager_; }
WasmMemoryTracker* memory_tracker() { return &memory_tracker_; }
AccountingAllocator* allocator() { return &allocator_; }
// Compilation statistics for TurboFan compilations.
@ -233,6 +235,7 @@ class V8_EXPORT_PRIVATE WasmEngine {
// calling this method.
void PotentiallyFinishCurrentGC();
WasmMemoryTracker memory_tracker_;
WasmCodeManager code_manager_;
AccountingAllocator allocator_;

View File

@ -26,6 +26,7 @@
#include "src/wasm/streaming-decoder.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-memory.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-serialization.h"
@ -1155,7 +1156,7 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
return;
}
auto shared = i::SharedFlag::kNotShared;
bool is_shared_memory = false;
auto enabled_features = i::wasm::WasmFeaturesFromIsolate(i_isolate);
if (enabled_features.threads) {
// Shared property of descriptor
@ -1164,11 +1165,10 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
descriptor->Get(context, shared_key);
v8::Local<v8::Value> value;
if (maybe_value.ToLocal(&value)) {
shared = value->BooleanValue(isolate) ? i::SharedFlag::kShared
: i::SharedFlag::kNotShared;
is_shared_memory = value->BooleanValue(isolate);
}
// Throw TypeError if shared is true, and the descriptor has no "maximum"
if (shared == i::SharedFlag::kShared && maximum == -1) {
if (is_shared_memory && maximum == -1) {
thrower.TypeError(
"If shared is true, maximum property should be defined.");
return;
@ -1177,12 +1177,13 @@ void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
i::Handle<i::JSObject> memory_obj;
if (!i::WasmMemoryObject::New(i_isolate, static_cast<uint32_t>(initial),
static_cast<uint32_t>(maximum), shared)
static_cast<uint32_t>(maximum),
is_shared_memory)
.ToHandle(&memory_obj)) {
thrower.RangeError("could not allocate memory");
return;
}
if (shared == i::SharedFlag::kShared) {
if (is_shared_memory) {
i::Handle<i::JSArrayBuffer> buffer(
i::Handle<i::WasmMemoryObject>::cast(memory_obj)->array_buffer(),
i_isolate);

633
src/wasm/wasm-memory.cc Normal file
View File

@ -0,0 +1,633 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <limits>
#include "src/heap/heap-inl.h"
#include "src/logging/counters.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/objects-inl.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-memory.h"
#include "src/wasm/wasm-module.h"
namespace v8 {
namespace internal {
namespace wasm {
namespace {
constexpr size_t kNegativeGuardSize = 1u << 31; // 2GiB
void AddAllocationStatusSample(Isolate* isolate,
WasmMemoryTracker::AllocationStatus status) {
isolate->counters()->wasm_memory_allocation_result()->AddSample(
static_cast<int>(status));
}
bool RunWithGCAndRetry(const std::function<bool()>& fn, Heap* heap,
bool* did_retry) {
// Try up to three times; getting rid of dead JSArrayBuffer allocations might
// require two GCs because the first GC maybe incremental and may have
// floating garbage.
static constexpr int kAllocationRetries = 2;
for (int trial = 0;; ++trial) {
if (fn()) return true;
// {fn} failed. If {kAllocationRetries} is reached, fail.
*did_retry = true;
if (trial == kAllocationRetries) return false;
// Otherwise, collect garbage and retry.
// TODO(wasm): Since reservation limits are engine-wide, we should do an
// engine-wide GC here (i.e. trigger a GC in each isolate using the engine,
// and wait for them all to finish). See https://crbug.com/v8/9405.
heap->MemoryPressureNotification(MemoryPressureLevel::kCritical, true);
}
}
void* TryAllocateBackingStore(WasmMemoryTracker* memory_tracker, Heap* heap,
size_t size, size_t max_size,
void** allocation_base,
size_t* allocation_length) {
using AllocationStatus = WasmMemoryTracker::AllocationStatus;
#if V8_TARGET_ARCH_64_BIT
constexpr bool kRequireFullGuardRegions = true;
#else
constexpr bool kRequireFullGuardRegions = false;
#endif
// Let the WasmMemoryTracker know we are going to reserve a bunch of
// address space.
size_t reservation_size = std::max(max_size, size);
bool did_retry = false;
auto reserve_memory_space = [&] {
// For guard regions, we always allocate the largest possible offset
// into the heap, so the addressable memory after the guard page can
// be made inaccessible.
//
// To protect against 32-bit integer overflow issues, we also
// protect the 2GiB before the valid part of the memory buffer.
*allocation_length =
kRequireFullGuardRegions
? RoundUp(kWasmMaxHeapOffset + kNegativeGuardSize, CommitPageSize())
: RoundUp(base::bits::RoundUpToPowerOfTwo(reservation_size),
kWasmPageSize);
DCHECK_GE(*allocation_length, size);
DCHECK_GE(*allocation_length, kWasmPageSize);
return memory_tracker->ReserveAddressSpace(*allocation_length);
};
if (!RunWithGCAndRetry(reserve_memory_space, heap, &did_retry)) {
// Reset reservation_size to initial size so that at least the initial size
// can be allocated if maximum size reservation is not possible.
reservation_size = size;
// We are over the address space limit. Fail.
//
// When running under the correctness fuzzer (i.e.
// --correctness-fuzzer-suppressions is preset), we crash
// instead so it is not incorrectly reported as a correctness
// violation. See https://crbug.com/828293#c4
if (FLAG_correctness_fuzzer_suppressions) {
FATAL("could not allocate wasm memory");
}
AddAllocationStatusSample(
heap->isolate(), AllocationStatus::kAddressSpaceLimitReachedFailure);
return nullptr;
}
// The Reserve makes the whole region inaccessible by default.
DCHECK_NULL(*allocation_base);
auto allocate_pages = [&] {
*allocation_base =
AllocatePages(GetPlatformPageAllocator(), nullptr, *allocation_length,
kWasmPageSize, PageAllocator::kNoAccess);
return *allocation_base != nullptr;
};
if (!RunWithGCAndRetry(allocate_pages, heap, &did_retry)) {
memory_tracker->ReleaseReservation(*allocation_length);
AddAllocationStatusSample(heap->isolate(), AllocationStatus::kOtherFailure);
return nullptr;
}
byte* memory = reinterpret_cast<byte*>(*allocation_base);
if (kRequireFullGuardRegions) {
memory += kNegativeGuardSize;
}
// Make the part we care about accessible.
auto commit_memory = [&] {
return size == 0 || SetPermissions(GetPlatformPageAllocator(), memory,
RoundUp(size, kWasmPageSize),
PageAllocator::kReadWrite);
};
// SetPermissions commits the extra memory, which may put us over the
// process memory limit. If so, report this as an OOM.
if (!RunWithGCAndRetry(commit_memory, heap, &did_retry)) {
V8::FatalProcessOutOfMemory(nullptr, "TryAllocateBackingStore");
}
memory_tracker->RegisterAllocation(heap->isolate(), *allocation_base,
*allocation_length, memory, size);
AddAllocationStatusSample(heap->isolate(),
did_retry ? AllocationStatus::kSuccessAfterRetry
: AllocationStatus::kSuccess);
return memory;
}
#if V8_TARGET_ARCH_MIPS64
// MIPS64 has a user space of 2^40 bytes on most processors,
// address space limits needs to be smaller.
constexpr size_t kAddressSpaceLimit = 0x8000000000L; // 512 GiB
#elif V8_TARGET_ARCH_64_BIT
constexpr size_t kAddressSpaceLimit = 0x10100000000L; // 1 TiB + 4 GiB
#else
constexpr size_t kAddressSpaceLimit = 0xC0000000; // 3 GiB
#endif
} // namespace
WasmMemoryTracker::~WasmMemoryTracker() {
// All reserved address space should be released before the allocation tracker
// is destroyed.
DCHECK_EQ(reserved_address_space_, 0u);
DCHECK_EQ(allocated_address_space_, 0u);
DCHECK(allocations_.empty());
}
void* WasmMemoryTracker::TryAllocateBackingStoreForTesting(
Heap* heap, size_t size, void** allocation_base,
size_t* allocation_length) {
return TryAllocateBackingStore(this, heap, size, size, allocation_base,
allocation_length);
}
void WasmMemoryTracker::FreeBackingStoreForTesting(base::AddressRegion memory,
void* buffer_start) {
base::MutexGuard scope_lock(&mutex_);
ReleaseAllocation_Locked(nullptr, buffer_start);
CHECK(FreePages(GetPlatformPageAllocator(),
reinterpret_cast<void*>(memory.begin()), memory.size()));
}
bool WasmMemoryTracker::ReserveAddressSpace(size_t num_bytes) {
size_t reservation_limit = kAddressSpaceLimit;
while (true) {
size_t old_count = reserved_address_space_.load();
if (old_count > reservation_limit) return false;
if (reservation_limit - old_count < num_bytes) return false;
if (reserved_address_space_.compare_exchange_weak(old_count,
old_count + num_bytes)) {
return true;
}
}
}
void WasmMemoryTracker::ReleaseReservation(size_t num_bytes) {
size_t const old_reserved = reserved_address_space_.fetch_sub(num_bytes);
USE(old_reserved);
DCHECK_LE(num_bytes, old_reserved);
}
void WasmMemoryTracker::RegisterAllocation(Isolate* isolate,
void* allocation_base,
size_t allocation_length,
void* buffer_start,
size_t buffer_length) {
base::MutexGuard scope_lock(&mutex_);
allocated_address_space_ += allocation_length;
// Report address space usage in MiB so the full range fits in an int on all
// platforms.
isolate->counters()->wasm_address_space_usage_mb()->AddSample(
static_cast<int>(allocated_address_space_ / MB));
allocations_.emplace(buffer_start,
AllocationData{allocation_base, allocation_length,
buffer_start, buffer_length});
}
WasmMemoryTracker::AllocationData WasmMemoryTracker::ReleaseAllocation_Locked(
Isolate* isolate, const void* buffer_start) {
auto find_result = allocations_.find(buffer_start);
CHECK_NE(find_result, allocations_.end());
size_t num_bytes = find_result->second.allocation_length;
DCHECK_LE(num_bytes, reserved_address_space_);
DCHECK_LE(num_bytes, allocated_address_space_);
reserved_address_space_ -= num_bytes;
allocated_address_space_ -= num_bytes;
AllocationData allocation_data = find_result->second;
allocations_.erase(find_result);
return allocation_data;
}
const WasmMemoryTracker::AllocationData* WasmMemoryTracker::FindAllocationData(
const void* buffer_start) {
base::MutexGuard scope_lock(&mutex_);
const auto& result = allocations_.find(buffer_start);
if (result != allocations_.end()) {
return &result->second;
}
return nullptr;
}
bool WasmMemoryTracker::IsWasmMemory(const void* buffer_start) {
base::MutexGuard scope_lock(&mutex_);
return allocations_.find(buffer_start) != allocations_.end();
}
bool WasmMemoryTracker::IsWasmSharedMemory(const void* buffer_start) {
base::MutexGuard scope_lock(&mutex_);
const auto& result = allocations_.find(buffer_start);
// Should be a wasm allocation, and registered as a shared allocation.
return (result != allocations_.end() && result->second.is_shared);
}
void WasmMemoryTracker::MarkWasmMemoryNotGrowable(
Handle<JSArrayBuffer> buffer) {
base::MutexGuard scope_lock(&mutex_);
const auto& allocation = allocations_.find(buffer->backing_store());
if (allocation == allocations_.end()) return;
allocation->second.is_growable = false;
}
bool WasmMemoryTracker::IsWasmMemoryGrowable(Handle<JSArrayBuffer> buffer) {
base::MutexGuard scope_lock(&mutex_);
if (buffer->backing_store() == nullptr) return true;
const auto& allocation = allocations_.find(buffer->backing_store());
if (allocation == allocations_.end()) return false;
return allocation->second.is_growable;
}
bool WasmMemoryTracker::FreeWasmMemory(Isolate* isolate,
const void* buffer_start) {
base::MutexGuard scope_lock(&mutex_);
const auto& result = allocations_.find(buffer_start);
if (result == allocations_.end()) return false;
if (result->second.is_shared) {
// This is a shared WebAssembly.Memory allocation
FreeMemoryIfNotShared_Locked(isolate, buffer_start);
return true;
}
// This is a WebAssembly.Memory allocation
const AllocationData allocation =
ReleaseAllocation_Locked(isolate, buffer_start);
CHECK(FreePages(GetPlatformPageAllocator(), allocation.allocation_base,
allocation.allocation_length));
return true;
}
void WasmMemoryTracker::RegisterWasmMemoryAsShared(
Handle<WasmMemoryObject> object, Isolate* isolate) {
// Only register with the tracker if shared grow is enabled.
if (!FLAG_wasm_grow_shared_memory) return;
const void* backing_store = object->array_buffer().backing_store();
// TODO(V8:8810): This should be a DCHECK, currently some tests do not
// use a full WebAssembly.Memory, and fail on registering so return early.
if (!IsWasmMemory(backing_store)) return;
{
base::MutexGuard scope_lock(&mutex_);
// Register as shared allocation when it is post messaged. This happens only
// the first time a buffer is shared over Postmessage, and track all the
// memory objects that are associated with this backing store.
RegisterSharedWasmMemory_Locked(object, isolate);
// Add isolate to backing store mapping.
isolates_per_buffer_[backing_store].emplace(isolate);
}
}
void WasmMemoryTracker::SetPendingUpdateOnGrow(Handle<JSArrayBuffer> old_buffer,
size_t new_size) {
base::MutexGuard scope_lock(&mutex_);
// Keep track of the new size of the buffer associated with each backing
// store.
AddBufferToGrowMap_Locked(old_buffer, new_size);
// Request interrupt to GROW_SHARED_MEMORY to other isolates
TriggerSharedGrowInterruptOnAllIsolates_Locked(old_buffer);
}
void WasmMemoryTracker::UpdateSharedMemoryInstances(Isolate* isolate) {
base::MutexGuard scope_lock(&mutex_);
// For every buffer in the grow_entry_map_, update the size for all the
// memory objects associated with this isolate.
for (auto it = grow_update_map_.begin(); it != grow_update_map_.end();) {
UpdateSharedMemoryStateOnInterrupt_Locked(isolate, it->first, it->second);
// If all the isolates that share this buffer have hit a stack check, their
// memory objects are updated, and this grow entry can be erased.
if (AreAllIsolatesUpdated_Locked(it->first)) {
it = grow_update_map_.erase(it);
} else {
it++;
}
}
}
void WasmMemoryTracker::RegisterSharedWasmMemory_Locked(
Handle<WasmMemoryObject> object, Isolate* isolate) {
DCHECK(object->array_buffer().is_shared());
void* backing_store = object->array_buffer().backing_store();
// The allocation of a WasmMemoryObject should always be registered with the
// WasmMemoryTracker.
const auto& result = allocations_.find(backing_store);
if (result == allocations_.end()) return;
// Register the allocation as shared, if not alreadt marked as shared.
if (!result->second.is_shared) result->second.is_shared = true;
// Create persistent global handles for the memory objects that are shared
GlobalHandles* global_handles = isolate->global_handles();
object = global_handles->Create(*object);
// Add to memory_object_vector to track memory objects, instance objects
// that will need to be updated on a Grow call
result->second.memory_object_vector.push_back(
SharedMemoryObjectState(object, isolate));
}
void WasmMemoryTracker::AddBufferToGrowMap_Locked(
Handle<JSArrayBuffer> old_buffer, size_t new_size) {
void* backing_store = old_buffer->backing_store();
auto entry = grow_update_map_.find(old_buffer->backing_store());
if (entry == grow_update_map_.end()) {
// No pending grow for this backing store, add to map.
grow_update_map_.emplace(backing_store, new_size);
return;
}
// If grow on the same buffer is requested before the update is complete,
// the new_size should always be greater or equal to the old_size. Equal
// in the case that grow(0) is called, but new buffer handles are mandated
// by the Spec.
CHECK_LE(entry->second, new_size);
entry->second = new_size;
// Flush instances_updated everytime a new grow size needs to be updates
ClearUpdatedInstancesOnPendingGrow_Locked(backing_store);
}
void WasmMemoryTracker::TriggerSharedGrowInterruptOnAllIsolates_Locked(
Handle<JSArrayBuffer> old_buffer) {
// Request a GrowShareMemory interrupt on all the isolates that share
// the backing store.
const auto& isolates = isolates_per_buffer_.find(old_buffer->backing_store());
for (const auto& isolate : isolates->second) {
isolate->stack_guard()->RequestGrowSharedMemory();
}
}
void WasmMemoryTracker::UpdateSharedMemoryStateOnInterrupt_Locked(
Isolate* isolate, void* backing_store, size_t new_size) {
// Update objects only if there are memory objects that share this backing
// store, and this isolate is marked as one of the isolates that shares this
// buffer.
if (MemoryObjectsNeedUpdate_Locked(isolate, backing_store)) {
UpdateMemoryObjectsForIsolate_Locked(isolate, backing_store, new_size);
// As the memory objects are updated, add this isolate to a set of isolates
// that are updated on grow. This state is maintained to track if all the
// isolates that share the backing store have hit a StackCheck.
isolates_updated_on_grow_[backing_store].emplace(isolate);
}
}
bool WasmMemoryTracker::AreAllIsolatesUpdated_Locked(
const void* backing_store) {
const auto& buffer_isolates = isolates_per_buffer_.find(backing_store);
// No isolates share this buffer.
if (buffer_isolates == isolates_per_buffer_.end()) return true;
const auto& updated_isolates = isolates_updated_on_grow_.find(backing_store);
// Some isolates share the buffer, but no isolates have been updated yet.
if (updated_isolates == isolates_updated_on_grow_.end()) return false;
if (buffer_isolates->second == updated_isolates->second) {
// If all the isolates that share this backing_store have hit a stack check,
// and the memory objects have been updated, remove the entry from the
// updatemap, and return true.
isolates_updated_on_grow_.erase(backing_store);
return true;
}
return false;
}
void WasmMemoryTracker::ClearUpdatedInstancesOnPendingGrow_Locked(
const void* backing_store) {
// On multiple grows to the same buffer, the entries for that buffer should be
// flushed. This is done so that any consecutive grows to the same buffer will
// update all instances that share this buffer.
const auto& value = isolates_updated_on_grow_.find(backing_store);
if (value != isolates_updated_on_grow_.end()) {
value->second.clear();
}
}
void WasmMemoryTracker::UpdateMemoryObjectsForIsolate_Locked(
Isolate* isolate, void* backing_store, size_t new_size) {
const auto& result = allocations_.find(backing_store);
if (result == allocations_.end() || !result->second.is_shared) return;
for (const auto& memory_obj_state : result->second.memory_object_vector) {
DCHECK_NE(memory_obj_state.isolate, nullptr);
if (isolate == memory_obj_state.isolate) {
HandleScope scope(isolate);
Handle<WasmMemoryObject> memory_object = memory_obj_state.memory_object;
DCHECK(memory_object->IsWasmMemoryObject());
DCHECK(memory_object->array_buffer().is_shared());
// Permissions adjusted, but create a new buffer with new size
// and old attributes. Buffer has already been allocated,
// just create a new buffer with same backing store.
bool is_external = memory_object->array_buffer().is_external();
Handle<JSArrayBuffer> new_buffer = SetupArrayBuffer(
isolate, backing_store, new_size, is_external, SharedFlag::kShared);
memory_obj_state.memory_object->update_instances(isolate, new_buffer);
}
}
}
bool WasmMemoryTracker::MemoryObjectsNeedUpdate_Locked(
Isolate* isolate, const void* backing_store) {
// Return true if this buffer has memory_objects it needs to update.
const auto& result = allocations_.find(backing_store);
if (result == allocations_.end() || !result->second.is_shared) return false;
// Only update if the buffer has memory objects that need to be updated.
if (result->second.memory_object_vector.empty()) return false;
const auto& isolate_entry = isolates_per_buffer_.find(backing_store);
return (isolate_entry != isolates_per_buffer_.end() &&
isolate_entry->second.count(isolate) != 0);
}
void WasmMemoryTracker::FreeMemoryIfNotShared_Locked(
Isolate* isolate, const void* backing_store) {
RemoveSharedBufferState_Locked(isolate, backing_store);
if (CanFreeSharedMemory_Locked(backing_store)) {
const AllocationData allocation =
ReleaseAllocation_Locked(isolate, backing_store);
CHECK(FreePages(GetPlatformPageAllocator(), allocation.allocation_base,
allocation.allocation_length));
}
}
bool WasmMemoryTracker::CanFreeSharedMemory_Locked(const void* backing_store) {
const auto& value = isolates_per_buffer_.find(backing_store);
// If no isolates share this buffer, backing store can be freed.
// Erase the buffer entry.
if (value == isolates_per_buffer_.end() || value->second.empty()) return true;
return false;
}
void WasmMemoryTracker::RemoveSharedBufferState_Locked(
Isolate* isolate, const void* backing_store) {
if (isolate != nullptr) {
DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(isolate, backing_store);
RemoveIsolateFromBackingStore_Locked(isolate, backing_store);
} else {
// This happens for externalized contents cleanup shared memory state
// associated with this buffer across isolates.
DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(backing_store);
}
}
void WasmMemoryTracker::DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(
const void* backing_store) {
const auto& result = allocations_.find(backing_store);
CHECK(result != allocations_.end() && result->second.is_shared);
auto& object_vector = result->second.memory_object_vector;
if (object_vector.empty()) return;
for (const auto& mem_obj_state : object_vector) {
GlobalHandles::Destroy(mem_obj_state.memory_object.location());
}
object_vector.clear();
// Remove isolate from backing store map.
isolates_per_buffer_.erase(backing_store);
}
void WasmMemoryTracker::DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(
Isolate* isolate, const void* backing_store) {
// This gets called when an internal handle to the ArrayBuffer should be
// freed, on heap tear down for that isolate, remove the memory objects
// that are associated with this buffer and isolate.
const auto& result = allocations_.find(backing_store);
CHECK(result != allocations_.end() && result->second.is_shared);
auto& object_vector = result->second.memory_object_vector;
if (object_vector.empty()) return;
for (auto it = object_vector.begin(); it != object_vector.end();) {
if (isolate == it->isolate) {
GlobalHandles::Destroy(it->memory_object.location());
it = object_vector.erase(it);
} else {
++it;
}
}
}
void WasmMemoryTracker::RemoveIsolateFromBackingStore_Locked(
Isolate* isolate, const void* backing_store) {
const auto& isolates = isolates_per_buffer_.find(backing_store);
if (isolates == isolates_per_buffer_.end() || isolates->second.empty())
return;
isolates->second.erase(isolate);
}
void WasmMemoryTracker::DeleteSharedMemoryObjectsOnIsolate(Isolate* isolate) {
base::MutexGuard scope_lock(&mutex_);
// This is possible for buffers that are externalized, and their handles have
// been freed, the backing store wasn't released because externalized contents
// were using it.
if (isolates_per_buffer_.empty()) return;
for (auto& entry : isolates_per_buffer_) {
if (entry.second.find(isolate) == entry.second.end()) continue;
const void* backing_store = entry.first;
entry.second.erase(isolate);
DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(isolate, backing_store);
}
for (auto& buffer_isolates : isolates_updated_on_grow_) {
auto& isolates = buffer_isolates.second;
isolates.erase(isolate);
}
}
Handle<JSArrayBuffer> SetupArrayBuffer(Isolate* isolate, void* backing_store,
size_t size, bool is_external,
SharedFlag shared) {
Handle<JSArrayBuffer> buffer =
isolate->factory()->NewJSArrayBuffer(shared, AllocationType::kOld);
constexpr bool is_wasm_memory = true;
JSArrayBuffer::Setup(buffer, isolate, is_external, backing_store, size,
shared, is_wasm_memory);
buffer->set_is_detachable(false);
return buffer;
}
MaybeHandle<JSArrayBuffer> AllocateAndSetupArrayBuffer(Isolate* isolate,
size_t size,
size_t maximum_size,
SharedFlag shared) {
// Enforce flag-limited maximum allocation size.
if (size > max_mem_bytes()) return {};
WasmMemoryTracker* memory_tracker = isolate->wasm_engine()->memory_tracker();
// Set by TryAllocateBackingStore or GetEmptyBackingStore
void* allocation_base = nullptr;
size_t allocation_length = 0;
void* memory = TryAllocateBackingStore(memory_tracker, isolate->heap(), size,
maximum_size, &allocation_base,
&allocation_length);
if (memory == nullptr) return {};
#if DEBUG
// Double check the API allocator actually zero-initialized the memory.
const byte* bytes = reinterpret_cast<const byte*>(memory);
for (size_t i = 0; i < size; ++i) {
DCHECK_EQ(0, bytes[i]);
}
#endif
reinterpret_cast<v8::Isolate*>(isolate)
->AdjustAmountOfExternalAllocatedMemory(size);
constexpr bool is_external = false;
return SetupArrayBuffer(isolate, memory, size, is_external, shared);
}
MaybeHandle<JSArrayBuffer> NewArrayBuffer(Isolate* isolate, size_t size) {
return AllocateAndSetupArrayBuffer(isolate, size, size,
SharedFlag::kNotShared);
}
MaybeHandle<JSArrayBuffer> NewSharedArrayBuffer(Isolate* isolate,
size_t initial_size,
size_t max_size) {
return AllocateAndSetupArrayBuffer(isolate, initial_size, max_size,
SharedFlag::kShared);
}
void DetachMemoryBuffer(Isolate* isolate, Handle<JSArrayBuffer> buffer,
bool free_memory) {
if (buffer->is_shared()) return; // Detaching shared buffers is impossible.
DCHECK(!buffer->is_detachable());
const bool is_external = buffer->is_external();
DCHECK(!buffer->is_detachable());
if (!is_external) {
buffer->set_is_external(true);
isolate->heap()->UnregisterArrayBuffer(*buffer);
if (free_memory) {
// We need to free the memory before detaching the buffer because
// FreeBackingStore reads buffer->allocation_base(), which is nulled out
// by Detach. This means there is a dangling pointer until we detach the
// buffer. Since there is no way for the user to directly call
// FreeBackingStore, we can ensure this is safe.
buffer->FreeBackingStoreFromMainThread();
}
}
DCHECK(buffer->is_external());
buffer->set_is_wasm_memory(false);
buffer->set_is_detachable(true);
buffer->Detach();
}
} // namespace wasm
} // namespace internal
} // namespace v8

289
src/wasm/wasm-memory.h Normal file
View File

@ -0,0 +1,289 @@
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_WASM_WASM_MEMORY_H_
#define V8_WASM_WASM_MEMORY_H_
#include <atomic>
#include <unordered_map>
#include <unordered_set>
#include "src/base/platform/mutex.h"
#include "src/flags/flags.h"
#include "src/handles/handles.h"
#include "src/objects/js-array-buffer.h"
namespace v8 {
namespace internal {
namespace wasm {
// The {WasmMemoryTracker} tracks reservations and allocations for wasm memory
// and wasm code. There is an upper limit on the total reserved memory which is
// checked by this class. Allocations are stored so we can look them up when an
// array buffer dies and figure out the reservation and allocation bounds for
// that buffer.
class WasmMemoryTracker {
public:
WasmMemoryTracker() = default;
V8_EXPORT_PRIVATE ~WasmMemoryTracker();
// ReserveAddressSpace attempts to increase the reserved address space counter
// by {num_bytes}. Returns true if successful (meaning it is okay to go ahead
// and reserve {num_bytes} bytes), false otherwise.
bool ReserveAddressSpace(size_t num_bytes);
void RegisterAllocation(Isolate* isolate, void* allocation_base,
size_t allocation_length, void* buffer_start,
size_t buffer_length);
struct SharedMemoryObjectState {
Handle<WasmMemoryObject> memory_object;
Isolate* isolate;
SharedMemoryObjectState() = default;
SharedMemoryObjectState(Handle<WasmMemoryObject> memory_object,
Isolate* isolate)
: memory_object(memory_object), isolate(isolate) {}
};
struct AllocationData {
void* allocation_base = nullptr;
size_t allocation_length = 0;
void* buffer_start = nullptr;
size_t buffer_length = 0;
bool is_shared = false;
// Wasm memories are growable by default, this will be false only when
// shared with an asmjs module.
bool is_growable = true;
// Track Wasm Memory instances across isolates, this is populated on
// PostMessage using persistent handles for memory objects.
std::vector<WasmMemoryTracker::SharedMemoryObjectState>
memory_object_vector;
private:
AllocationData() = default;
AllocationData(void* allocation_base, size_t allocation_length,
void* buffer_start, size_t buffer_length)
: allocation_base(allocation_base),
allocation_length(allocation_length),
buffer_start(buffer_start),
buffer_length(buffer_length) {
DCHECK_LE(reinterpret_cast<uintptr_t>(allocation_base),
reinterpret_cast<uintptr_t>(buffer_start));
DCHECK_GE(
reinterpret_cast<uintptr_t>(allocation_base) + allocation_length,
reinterpret_cast<uintptr_t>(buffer_start));
DCHECK_GE(
reinterpret_cast<uintptr_t>(allocation_base) + allocation_length,
reinterpret_cast<uintptr_t>(buffer_start) + buffer_length);
}
friend WasmMemoryTracker;
};
// Allow tests to allocate a backing store the same way as we do it for
// WebAssembly memory. This is used in unit tests for trap handler to
// generate the same signals/exceptions for invalid memory accesses as
// we would get with WebAssembly memory.
V8_EXPORT_PRIVATE void* TryAllocateBackingStoreForTesting(
Heap* heap, size_t size, void** allocation_base,
size_t* allocation_length);
// Free memory allocated with TryAllocateBackingStoreForTesting.
V8_EXPORT_PRIVATE void FreeBackingStoreForTesting(base::AddressRegion memory,
void* buffer_start);
// Decreases the amount of reserved address space.
void ReleaseReservation(size_t num_bytes);
V8_EXPORT_PRIVATE bool IsWasmMemory(const void* buffer_start);
bool IsWasmSharedMemory(const void* buffer_start);
// Returns a pointer to a Wasm buffer's allocation data, or nullptr if the
// buffer is not tracked.
V8_EXPORT_PRIVATE const AllocationData* FindAllocationData(
const void* buffer_start);
// Free Memory allocated by the Wasm memory tracker
bool FreeWasmMemory(Isolate* isolate, const void* buffer_start);
void MarkWasmMemoryNotGrowable(Handle<JSArrayBuffer> buffer);
bool IsWasmMemoryGrowable(Handle<JSArrayBuffer> buffer);
// When WebAssembly.Memory is transferred over PostMessage, register the
// allocation as shared and track the memory objects that will need
// updating if memory is resized.
void RegisterWasmMemoryAsShared(Handle<WasmMemoryObject> object,
Isolate* isolate);
// This method is called when the underlying backing store is grown, but
// instances that share the backing_store have not yet been updated.
void SetPendingUpdateOnGrow(Handle<JSArrayBuffer> old_buffer,
size_t new_size);
// Interrupt handler for GROW_SHARED_MEMORY interrupt. Update memory objects
// and instances that share the memory objects after a Grow call.
void UpdateSharedMemoryInstances(Isolate* isolate);
// Due to timing of when buffers are garbage collected, vs. when isolate
// object handles are destroyed, it is possible to leak global handles. To
// avoid this, cleanup any global handles on isolate destruction if any exist.
void DeleteSharedMemoryObjectsOnIsolate(Isolate* isolate);
// Allocation results are reported to UMA
//
// See wasm_memory_allocation_result in counters.h
enum class AllocationStatus {
kSuccess, // Succeeded on the first try
kSuccessAfterRetry, // Succeeded after garbage collection
kAddressSpaceLimitReachedFailure, // Failed because Wasm is at its address
// space limit
kOtherFailure // Failed for an unknown reason
};
private:
// Helper methods to free memory only if not shared by other isolates, memory
// objects.
void FreeMemoryIfNotShared_Locked(Isolate* isolate,
const void* backing_store);
bool CanFreeSharedMemory_Locked(const void* backing_store);
void RemoveSharedBufferState_Locked(Isolate* isolate,
const void* backing_store);
// Registers the allocation as shared, and tracks all the memory objects
// associates with this allocation across isolates.
void RegisterSharedWasmMemory_Locked(Handle<WasmMemoryObject> object,
Isolate* isolate);
// Map the new size after grow to the buffer backing store, so that instances
// and memory objects that share the WebAssembly.Memory across isolates can
// be updated..
void AddBufferToGrowMap_Locked(Handle<JSArrayBuffer> old_buffer,
size_t new_size);
// Trigger a GROW_SHARED_MEMORY interrupt on all the isolates that have memory
// objects that share this buffer.
void TriggerSharedGrowInterruptOnAllIsolates_Locked(
Handle<JSArrayBuffer> old_buffer);
// When isolates hit a stack check, update the memory objects associated with
// that isolate.
void UpdateSharedMemoryStateOnInterrupt_Locked(Isolate* isolate,
void* backing_store,
size_t new_size);
// Check if all the isolates that share a backing_store have hit a stack
// check. If a stack check is hit, and the backing store is pending grow,
// this isolate will have updated memory objects.
bool AreAllIsolatesUpdated_Locked(const void* backing_store);
// If a grow call is made to a buffer with a pending grow, and all the
// isolates that share this buffer have not hit a StackCheck, clear the set of
// already updated instances so they can be updated with the new size on the
// most recent grow call.
void ClearUpdatedInstancesOnPendingGrow_Locked(const void* backing_store);
// Helper functions to update memory objects on grow, and maintain state for
// which isolates hit a stack check.
void UpdateMemoryObjectsForIsolate_Locked(Isolate* isolate,
void* backing_store,
size_t new_size);
bool MemoryObjectsNeedUpdate_Locked(Isolate* isolate,
const void* backing_store);
// Destroy global handles to memory objects, and remove backing store from
// isolates_per_buffer on Free.
void DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(
Isolate* isolate, const void* backing_store);
void DestroyMemoryObjectsAndRemoveIsolateEntry_Locked(
const void* backing_store);
void RemoveIsolateFromBackingStore_Locked(Isolate* isolate,
const void* backing_store);
// Removes an allocation from the tracker.
AllocationData ReleaseAllocation_Locked(Isolate* isolate,
const void* buffer_start);
// Clients use a two-part process. First they "reserve" the address space,
// which signifies an intent to actually allocate it. This determines whether
// doing the allocation would put us over our limit. Once there is a
// reservation, clients can do the allocation and register the result.
//
// We should always have:
// allocated_address_space_ <= reserved_address_space_ <= kAddressSpaceLimit
std::atomic<size_t> reserved_address_space_{0};
// Used to protect access to the allocated address space counter and
// allocation map. This is needed because Wasm memories can be freed on
// another thread by the ArrayBufferTracker.
base::Mutex mutex_;
size_t allocated_address_space_ = 0;
//////////////////////////////////////////////////////////////////////////////
// Protected by {mutex_}:
// Track Wasm memory allocation information. This is keyed by the start of the
// buffer, rather than by the start of the allocation.
std::unordered_map<const void*, AllocationData> allocations_;
// Maps each buffer to the isolates that share the backing store.
std::unordered_map<const void*, std::unordered_set<Isolate*>>
isolates_per_buffer_;
// Maps which isolates have had a grow interrupt handled on the buffer. This
// is maintained to ensure that the instances are updated with the right size
// on Grow.
std::unordered_map<const void*, std::unordered_set<Isolate*>>
isolates_updated_on_grow_;
// Maps backing stores(void*) to the size of the underlying memory in
// (size_t). An entry to this map is made on a grow call to the corresponding
// backing store. On consecutive grow calls to the same backing store,
// the size entry is updated. This entry is made right after the mprotect
// call to change the protections on a backing_store, so the memory objects
// have not been updated yet. The backing store entry in this map is erased
// when all the memory objects, or instances that share this backing store
// have their bounds updated.
std::unordered_map<void*, size_t> grow_update_map_;
// End of fields protected by {mutex_}.
//////////////////////////////////////////////////////////////////////////////
DISALLOW_COPY_AND_ASSIGN(WasmMemoryTracker);
};
// Attempts to allocate an array buffer with guard regions suitable for trap
// handling. If address space is not available, it will return a buffer with
// mini-guards that will require bounds checks.
V8_EXPORT_PRIVATE MaybeHandle<JSArrayBuffer> NewArrayBuffer(Isolate*,
size_t size);
// Attempts to allocate a SharedArrayBuffer with guard regions suitable for
// trap handling. If address space is not available, it will try to reserve
// up to the maximum for that memory. If all else fails, it will return a
// buffer with mini-guards of initial size.
V8_EXPORT_PRIVATE MaybeHandle<JSArrayBuffer> NewSharedArrayBuffer(
Isolate*, size_t initial_size, size_t max_size);
Handle<JSArrayBuffer> SetupArrayBuffer(
Isolate*, void* backing_store, size_t size, bool is_external,
SharedFlag shared = SharedFlag::kNotShared);
V8_EXPORT_PRIVATE void DetachMemoryBuffer(Isolate* isolate,
Handle<JSArrayBuffer> buffer,
bool free_memory);
} // namespace wasm
} // namespace internal
} // namespace v8
#endif // V8_WASM_WASM_MEMORY_H_

View File

@ -272,19 +272,21 @@ Handle<JSArray> GetCustomSections(Isolate* isolate,
// Make a copy of the payload data in the section.
size_t size = section.payload.length();
MaybeHandle<JSArrayBuffer> result =
isolate->factory()->NewJSArrayBufferAndBackingStore(
size, InitializedFlag::kUninitialized);
Handle<JSArrayBuffer> array_buffer;
if (!result.ToHandle(&array_buffer)) {
void* memory =
size == 0 ? nullptr : isolate->array_buffer_allocator()->Allocate(size);
if (size && !memory) {
thrower->RangeError("out of memory allocating custom section data");
return Handle<JSArray>();
}
memcpy(array_buffer->backing_store(),
wire_bytes.begin() + section.payload.offset(),
Handle<JSArrayBuffer> buffer =
isolate->factory()->NewJSArrayBuffer(SharedFlag::kNotShared);
constexpr bool is_external = false;
JSArrayBuffer::Setup(buffer, isolate, is_external, memory, size);
memcpy(memory, wire_bytes.begin() + section.payload.offset(),
section.payload.length());
matching_sections.push_back(array_buffer);
matching_sections.push_back(buffer);
}
int num_custom_sections = static_cast<int>(matching_sections.size());

View File

@ -25,6 +25,7 @@
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-limits.h"
#include "src/wasm/wasm-memory.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-text.h"
@ -1232,17 +1233,66 @@ void WasmIndirectFunctionTable::Resize(Isolate* isolate,
}
namespace {
bool AdjustBufferPermissions(Isolate* isolate, Handle<JSArrayBuffer> old_buffer,
size_t new_size) {
if (new_size > old_buffer->allocation_length()) return false;
void* old_mem_start = old_buffer->backing_store();
size_t old_size = old_buffer->byte_length();
if (old_size != new_size) {
DCHECK_NOT_NULL(old_mem_start);
DCHECK_GE(new_size, old_size);
// If adjusting permissions fails, propagate error back to return
// failure to grow.
if (!i::SetPermissions(GetPlatformPageAllocator(), old_mem_start, new_size,
PageAllocator::kReadWrite)) {
return false;
}
reinterpret_cast<v8::Isolate*>(isolate)
->AdjustAmountOfExternalAllocatedMemory(new_size - old_size);
}
return true;
}
MaybeHandle<JSArrayBuffer> MemoryGrowBuffer(Isolate* isolate,
Handle<JSArrayBuffer> old_buffer,
size_t new_size) {
CHECK_EQ(0, new_size % wasm::kWasmPageSize);
// Reusing the backing store from externalized buffers causes problems with
// Blink's array buffers. The connection between the two is lost, which can
// lead to Blink not knowing about the other reference to the buffer and
// freeing it too early.
if (old_buffer->is_external() || new_size > old_buffer->allocation_length()) {
// We couldn't reuse the old backing store, so create a new one and copy the
// old contents in.
Handle<JSArrayBuffer> new_buffer;
if (!wasm::NewArrayBuffer(isolate, new_size).ToHandle(&new_buffer)) {
return {};
}
void* old_mem_start = old_buffer->backing_store();
size_t old_size = old_buffer->byte_length();
if (old_size == 0) return new_buffer;
memcpy(new_buffer->backing_store(), old_mem_start, old_size);
DCHECK(old_buffer.is_null() || !old_buffer->is_shared());
constexpr bool free_memory = true;
i::wasm::DetachMemoryBuffer(isolate, old_buffer, free_memory);
return new_buffer;
} else {
if (!AdjustBufferPermissions(isolate, old_buffer, new_size)) return {};
// NOTE: We must allocate a new array buffer here because the spec
// assumes that ArrayBuffers do not change size.
void* backing_store = old_buffer->backing_store();
bool is_external = old_buffer->is_external();
// Disconnect buffer early so GC won't free it.
i::wasm::DetachMemoryBuffer(isolate, old_buffer, false);
Handle<JSArrayBuffer> new_buffer =
wasm::SetupArrayBuffer(isolate, backing_store, new_size, is_external);
return new_buffer;
}
}
// May GC, because SetSpecializationMemInfoFrom may GC
void SetInstanceMemory(Handle<WasmInstanceObject> instance,
Handle<JSArrayBuffer> buffer) {
bool is_wasm_module = instance->module()->origin == wasm::kWasmOrigin;
bool use_trap_handler =
instance->module_object().native_module()->use_trap_handler();
// Wasm modules compiled to use the trap handler don't have bounds checks,
// so they must have a memory that has guard regions.
CHECK_IMPLIES(is_wasm_module && use_trap_handler,
buffer->GetBackingStore()->has_guard_regions());
instance->SetRawMemory(reinterpret_cast<byte*>(buffer->backing_store()),
buffer->byte_length());
#if DEBUG
@ -1260,6 +1310,7 @@ void SetInstanceMemory(Handle<WasmInstanceObject> instance,
}
#endif
}
} // namespace
Handle<WasmMemoryObject> WasmMemoryObject::New(
@ -1267,47 +1318,44 @@ Handle<WasmMemoryObject> WasmMemoryObject::New(
uint32_t maximum) {
Handle<JSArrayBuffer> buffer;
if (!maybe_buffer.ToHandle(&buffer)) {
// If no buffer was provided, create a zero-length one.
auto clamped_maximum =
std::min(static_cast<size_t>(maximum), wasm::kV8MaxWasmMemoryPages);
auto backing_store = BackingStore::AllocateWasmMemory(
isolate, 0, clamped_maximum, SharedFlag::kNotShared);
buffer = isolate->factory()->NewJSArrayBuffer();
buffer->Attach(std::move(backing_store));
// If no buffer was provided, create a 0-length one.
buffer = wasm::SetupArrayBuffer(isolate, nullptr, 0, false);
}
// TODO(kschimpf): Do we need to add an argument that defines the
// style of memory the user prefers (with/without trap handling), so
// that the memory will match the style of the compiled wasm module.
// See issue v8:7143
Handle<JSFunction> memory_ctor(
isolate->native_context()->wasm_memory_constructor(), isolate);
auto memory_object = Handle<WasmMemoryObject>::cast(
auto memory_obj = Handle<WasmMemoryObject>::cast(
isolate->factory()->NewJSObject(memory_ctor, AllocationType::kOld));
memory_object->set_array_buffer(*buffer);
memory_object->set_maximum_pages(maximum);
memory_obj->set_array_buffer(*buffer);
memory_obj->set_maximum_pages(maximum);
if (buffer->is_shared()) {
auto backing_store = buffer->GetBackingStore();
backing_store->AttachSharedWasmMemoryObject(isolate, memory_object);
}
return memory_object;
return memory_obj;
}
MaybeHandle<WasmMemoryObject> WasmMemoryObject::New(Isolate* isolate,
uint32_t initial,
uint32_t maximum,
SharedFlag shared) {
auto backing_store =
BackingStore::AllocateWasmMemory(isolate, initial, maximum, shared);
if (!backing_store) return {};
Handle<JSArrayBuffer> buffer =
(shared == SharedFlag::kShared)
? isolate->factory()->NewJSSharedArrayBuffer()
: isolate->factory()->NewJSArrayBuffer();
buffer->Attach(std::move(backing_store));
bool is_shared_memory) {
Handle<JSArrayBuffer> buffer;
size_t size = static_cast<size_t>(i::wasm::kWasmPageSize) *
static_cast<size_t>(initial);
if (is_shared_memory) {
size_t max_size = static_cast<size_t>(i::wasm::kWasmPageSize) *
static_cast<size_t>(maximum);
if (!i::wasm::NewSharedArrayBuffer(isolate, size, max_size)
.ToHandle(&buffer)) {
return {};
}
} else {
if (!i::wasm::NewArrayBuffer(isolate, size).ToHandle(&buffer)) {
return {};
}
}
return New(isolate, buffer, maximum);
}
@ -1351,11 +1399,11 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate,
uint32_t pages) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.wasm"), "GrowMemory");
Handle<JSArrayBuffer> old_buffer(memory_object->array_buffer(), isolate);
// Any buffer used as an asmjs memory cannot be detached, and
// therefore this memory cannot be grown.
if (old_buffer->is_asmjs_memory()) return -1;
if (old_buffer->is_shared() && !FLAG_wasm_grow_shared_memory) return -1;
auto* memory_tracker = isolate->wasm_engine()->memory_tracker();
if (!memory_tracker->IsWasmMemoryGrowable(old_buffer)) return -1;
// Checks for maximum memory size.
// Checks for maximum memory size, compute new size.
uint32_t maximum_pages = wasm::max_mem_pages();
if (memory_object->has_maximum_pages()) {
maximum_pages = std::min(
@ -1370,49 +1418,47 @@ int32_t WasmMemoryObject::Grow(Isolate* isolate,
(pages > wasm::max_mem_pages() - old_pages)) { // exceeds limit
return -1;
}
// Compute new size.
size_t new_size =
static_cast<size_t>(old_pages + pages) * wasm::kWasmPageSize;
std::shared_ptr<BackingStore> backing_store = old_buffer->GetBackingStore();
if (!backing_store) return -1;
// Try to handle shared memory first.
// Memory is grown, but the memory objects and instances are not yet updated.
// Handle this in the interrupt handler so that it's safe for all the isolates
// that share this buffer to be updated safely.
Handle<JSArrayBuffer> new_buffer;
if (old_buffer->is_shared()) {
if (FLAG_wasm_grow_shared_memory) {
// Shared memories can only be grown in place; no copying.
if (backing_store->GrowWasmMemoryInPlace(isolate, new_size)) {
BackingStore::BroadcastSharedWasmMemoryGrow(isolate, backing_store,
new_size);
// Broadcasting the update should update this memory object too.
DCHECK_NE(*old_buffer, memory_object->array_buffer());
DCHECK_EQ(new_size, memory_object->array_buffer().byte_length());
return static_cast<int32_t>(old_pages); // success
}
// Adjust protections for the buffer.
if (!AdjustBufferPermissions(isolate, old_buffer, new_size)) {
return -1;
}
return -1;
}
// Try to grow non-shared memory in-place.
if (backing_store->GrowWasmMemoryInPlace(isolate, new_size)) {
// Detach old and create a new one with the grown backing store.
old_buffer->Detach(true);
Handle<JSArrayBuffer> new_buffer = isolate->factory()->NewJSArrayBuffer();
new_buffer->Attach(backing_store);
void* backing_store = old_buffer->backing_store();
if (memory_tracker->IsWasmSharedMemory(backing_store)) {
// This memory is shared between different isolates.
DCHECK(old_buffer->is_shared());
// Update pending grow state, and trigger a grow interrupt on all the
// isolates that share this buffer.
memory_tracker->SetPendingUpdateOnGrow(old_buffer, new_size);
// Handle interrupts for this isolate so that the instances with this
// isolate are updated.
isolate->stack_guard()->HandleInterrupts();
// Failure to allocate, or adjust pemissions already handled here, and
// updates to instances handled in the interrupt handler safe to return.
return static_cast<uint32_t>(old_size / wasm::kWasmPageSize);
}
// SharedArrayBuffer, but not shared across isolates. Setup a new buffer
// with updated permissions and update the instances.
new_buffer =
wasm::SetupArrayBuffer(isolate, backing_store, new_size,
old_buffer->is_external(), SharedFlag::kShared);
memory_object->update_instances(isolate, new_buffer);
return static_cast<int32_t>(old_pages); // success
} else {
if (!MemoryGrowBuffer(isolate, old_buffer, new_size)
.ToHandle(&new_buffer)) {
return -1;
}
}
// Try allocating a new backing store and copying.
std::unique_ptr<BackingStore> new_backing_store =
BackingStore::CopyWasmMemory(isolate, backing_store, new_size);
if (!new_backing_store) return -1;
// Detach old and create a new one with the new backing store.
old_buffer->Detach(true);
Handle<JSArrayBuffer> new_buffer = isolate->factory()->NewJSArrayBuffer();
new_buffer->Attach(std::move(new_backing_store));
// Update instances if any.
memory_object->update_instances(isolate, new_buffer);
return static_cast<int32_t>(old_pages); // success
return static_cast<uint32_t>(old_size / wasm::kWasmPageSize);
}
// static
@ -1446,15 +1492,18 @@ MaybeHandle<WasmGlobalObject> WasmGlobalObject::New(
global_obj->set_tagged_buffer(*tagged_buffer);
} else {
DCHECK(maybe_tagged_buffer.is_null());
uint32_t type_size = wasm::ValueTypes::ElementSizeInBytes(type);
Handle<JSArrayBuffer> untagged_buffer;
uint32_t type_size = wasm::ValueTypes::ElementSizeInBytes(type);
if (!maybe_untagged_buffer.ToHandle(&untagged_buffer)) {
MaybeHandle<JSArrayBuffer> result =
isolate->factory()->NewJSArrayBufferAndBackingStore(
offset + type_size, InitializedFlag::kZeroInitialized);
// If no buffer was provided, create one long enough for the given type.
untagged_buffer = isolate->factory()->NewJSArrayBuffer(
SharedFlag::kNotShared, AllocationType::kOld);
if (!result.ToHandle(&untagged_buffer)) return {};
const bool initialize = true;
if (!JSArrayBuffer::SetupAllocatingData(untagged_buffer, isolate,
type_size, initialize)) {
return {};
}
}
// Check that the offset is in bounds.

View File

@ -46,8 +46,6 @@ class WasmJSFunction;
class WasmModuleObject;
class WasmIndirectFunctionTable;
enum class SharedFlag : uint8_t;
template <class CppType>
class Managed;
@ -361,10 +359,9 @@ class WasmMemoryObject : public JSObject {
V8_EXPORT_PRIVATE static Handle<WasmMemoryObject> New(
Isolate* isolate, MaybeHandle<JSArrayBuffer> buffer, uint32_t maximum);
V8_EXPORT_PRIVATE static MaybeHandle<WasmMemoryObject> New(Isolate* isolate,
uint32_t initial,
uint32_t maximum,
SharedFlag shared);
V8_EXPORT_PRIVATE static MaybeHandle<WasmMemoryObject> New(
Isolate* isolate, uint32_t initial, uint32_t maximum,
bool is_shared_memory);
void update_instances(Isolate* isolate, Handle<JSArrayBuffer> buffer);

View File

@ -153,7 +153,6 @@ v8_source_set("cctest_sources") {
"interpreter/test-source-positions.cc",
"libplatform/test-tracing.cc",
"libsampler/test-sampler.cc",
"manually-externalized-buffer.h",
"parsing/test-parse-decision.cc",
"parsing/test-preparser.cc",
"parsing/test-scanner-streams.cc",
@ -178,7 +177,6 @@ v8_source_set("cctest_sources") {
"test-api.h",
"test-array-list.cc",
"test-atomicops.cc",
"test-backing-store.cc",
"test-bignum-dtoa.cc",
"test-bignum.cc",
"test-bit-vector.cc",
@ -260,7 +258,6 @@ v8_source_set("cctest_sources") {
"unicode-helpers.cc",
"unicode-helpers.h",
"wasm/test-c-wasm-entry.cc",
"wasm/test-grow-memory.cc",
"wasm/test-jump-table-assembler.cc",
"wasm/test-run-wasm-64.cc",
"wasm/test-run-wasm-asmjs.cc",

View File

@ -474,10 +474,8 @@
'test-api/WasmI32AtomicWaitCallback': [SKIP],
'test-api/WasmI64AtomicWaitCallback': [SKIP],
'test-api/WasmStreaming*': [SKIP],
'test-backing-store/Run_WasmModule_Buffer_Externalized_Regression_UseAfterFree': [SKIP],
'test-c-wasm-entry/*': [SKIP],
'test-jump-table-assembler/*': [SKIP],
'test-grow-memory/*': [SKIP],
'test-run-wasm-64/*': [SKIP],
'test-run-wasm-asmjs/*': [SKIP],
'test-run-wasm-atomics64/*': [SKIP],

View File

@ -193,8 +193,8 @@ TEST(ArrayBuffer_UnregisterDuringSweep) {
// barriers and proper synchronization this will trigger a data race on
// TSAN.
v8::ArrayBuffer::Contents contents = ab->Externalize();
contents.Deleter()(contents.Data(), contents.ByteLength(),
contents.DeleterData());
heap->isolate()->array_buffer_allocator()->Free(contents.Data(),
contents.ByteLength());
}
}

View File

@ -146,10 +146,8 @@ UNINITIALIZED_TEST(PagePromotion_NewToNewJSArrayBuffer) {
heap::FillCurrentPage(heap->new_space());
// Allocate a buffer we would like to check against.
Handle<JSArrayBuffer> buffer =
i_isolate->factory()
->NewJSArrayBufferAndBackingStore(100,
InitializedFlag::kZeroInitialized)
.ToHandleChecked();
i_isolate->factory()->NewJSArrayBuffer(SharedFlag::kNotShared);
CHECK(JSArrayBuffer::SetupAllocatingData(buffer, i_isolate, 100));
std::vector<Handle<FixedArray>> handles;
// Simulate a full space, filling the interesting page with live objects.
heap::SimulateFullSpace(heap->new_space(), &handles);
@ -190,10 +188,8 @@ UNINITIALIZED_TEST(PagePromotion_NewToOldJSArrayBuffer) {
heap::FillCurrentPage(heap->new_space());
// Allocate a buffer we would like to check against.
Handle<JSArrayBuffer> buffer =
i_isolate->factory()
->NewJSArrayBufferAndBackingStore(100,
InitializedFlag::kZeroInitialized)
.ToHandleChecked();
i_isolate->factory()->NewJSArrayBuffer(SharedFlag::kNotShared);
CHECK(JSArrayBuffer::SetupAllocatingData(buffer, i_isolate, 100));
std::vector<Handle<FixedArray>> handles;
// Simulate a full space, filling the interesting page with live objects.
heap::SimulateFullSpace(heap->new_space(), &handles);

View File

@ -1,34 +0,0 @@
// Copyright 2019 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CCTEST_MANUALLY_EXTERNALIZED_BUFFER_H_
#define V8_CCTEST_MANUALLY_EXTERNALIZED_BUFFER_H_
#include "src/api/api-inl.h"
namespace v8 {
namespace internal {
namespace testing {
// Utility to free the allocated memory for a buffer that is manually
// externalized in a test.
struct ManuallyExternalizedBuffer {
Handle<JSArrayBuffer> buffer_;
v8::ArrayBuffer::Contents contents_;
explicit ManuallyExternalizedBuffer(Handle<JSArrayBuffer> buffer)
: buffer_(buffer),
contents_(v8::Utils::ToLocal(buffer_)->Externalize()) {}
~ManuallyExternalizedBuffer() {
contents_.Deleter()(contents_.Data(), contents_.ByteLength(),
contents_.DeleterData());
}
void* backing_store() { return contents_.Data(); }
};
} // namespace testing
} // namespace internal
} // namespace v8
#endif // V8_CCTEST_MANUALLY_EXTERNALIZED_BUFFER_H_

View File

@ -17,10 +17,7 @@ class ScopedArrayBufferContents {
public:
explicit ScopedArrayBufferContents(const v8::ArrayBuffer::Contents& contents)
: contents_(contents) {}
~ScopedArrayBufferContents() {
contents_.Deleter()(contents_.Data(), contents_.ByteLength(),
contents_.DeleterData());
}
~ScopedArrayBufferContents() { free(contents_.AllocationBase()); }
void* Data() const { return contents_.Data(); }
size_t ByteLength() const { return contents_.ByteLength(); }
@ -39,10 +36,7 @@ class ScopedSharedArrayBufferContents {
explicit ScopedSharedArrayBufferContents(
const v8::SharedArrayBuffer::Contents& contents)
: contents_(contents) {}
~ScopedSharedArrayBufferContents() {
contents_.Deleter()(contents_.Data(), contents_.ByteLength(),
contents_.DeleterData());
}
~ScopedSharedArrayBufferContents() { free(contents_.AllocationBase()); }
void* Data() const { return contents_.Data(); }
size_t ByteLength() const { return contents_.ByteLength(); }

View File

@ -1,85 +0,0 @@
// Copyright 2019 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/api/api-inl.h"
#include "src/objects/backing-store.h"
#include "src/wasm/wasm-objects.h"
#include "test/cctest/cctest.h"
#include "test/cctest/manually-externalized-buffer.h"
namespace v8 {
namespace internal {
using testing::ManuallyExternalizedBuffer;
TEST(Run_WasmModule_Buffer_Externalized_Detach) {
{
// Regression test for
// https://bugs.chromium.org/p/chromium/issues/detail?id=731046
Isolate* isolate = CcTest::InitIsolateOnce();
HandleScope scope(isolate);
MaybeHandle<JSArrayBuffer> result =
isolate->factory()->NewJSArrayBufferAndBackingStore(
wasm::kWasmPageSize, InitializedFlag::kZeroInitialized);
Handle<JSArrayBuffer> buffer = result.ToHandleChecked();
// Embedder requests contents.
ManuallyExternalizedBuffer external(buffer);
buffer->Detach();
CHECK(buffer->was_detached());
// Make sure we can write to the buffer without crashing
uint32_t* int_buffer =
reinterpret_cast<uint32_t*>(external.backing_store());
int_buffer[0] = 0;
// Embedder frees contents.
}
CcTest::CollectAllAvailableGarbage();
}
TEST(Run_WasmModule_Buffer_Externalized_Regression_UseAfterFree) {
{
// Regression test for https://crbug.com/813876
Isolate* isolate = CcTest::InitIsolateOnce();
HandleScope scope(isolate);
MaybeHandle<WasmMemoryObject> result =
WasmMemoryObject::New(isolate, 1, 1, SharedFlag::kNotShared);
Handle<WasmMemoryObject> memory_object = result.ToHandleChecked();
Handle<JSArrayBuffer> buffer(memory_object->array_buffer(), isolate);
{
// Embedder requests contents.
ManuallyExternalizedBuffer external(buffer);
// Growing (even by 0) detaches the old buffer.
WasmMemoryObject::Grow(isolate, memory_object, 0);
CHECK(buffer->was_detached());
// Embedder frees contents.
}
// Make sure the memory object has a new buffer that can be written to.
uint32_t* int_buffer = reinterpret_cast<uint32_t*>(
memory_object->array_buffer().backing_store());
int_buffer[0] = 0;
}
CcTest::CollectAllAvailableGarbage();
}
#if V8_TARGET_ARCH_64_BIT
TEST(BackingStore_Reclaim) {
// Make sure we can allocate memories without running out of address space.
Isolate* isolate = CcTest::InitIsolateOnce();
for (int i = 0; i < 256; ++i) {
auto backing_store =
BackingStore::AllocateWasmMemory(isolate, 1, 1, SharedFlag::kNotShared);
CHECK(backing_store);
}
}
#endif
} // namespace internal
} // namespace v8

View File

@ -1559,8 +1559,8 @@ TEST(TryLookupElement) {
v8::ArrayBuffer::Contents contents = buffer->Externalize();
buffer->Detach();
contents.Deleter()(contents.Data(), contents.ByteLength(),
contents.DeleterData());
isolate->array_buffer_allocator()->Free(contents.Data(),
contents.ByteLength());
CHECK_ABSENT(object, 0);
CHECK_ABSENT(object, 1);

View File

@ -2913,8 +2913,7 @@ TEST(ArrayBufferSharedBackingStore) {
CHECK(ab2_data);
CHECK_EQ(ab1_data, ab2_data);
CHECK_EQ(2, GetRetainersCount(snapshot, ab1_data));
ab_contents.Deleter()(ab_contents.Data(), ab_contents.ByteLength(),
ab_contents.DeleterData());
free(data);
}

View File

@ -1,131 +0,0 @@
// Copyright 2019 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-opcodes.h"
#include "src/wasm/wasm-module-builder.h"
#include "test/cctest/cctest.h"
#include "test/cctest/manually-externalized-buffer.h"
#include "test/common/wasm/flag-utils.h"
#include "test/common/wasm/test-signatures.h"
#include "test/common/wasm/wasm-macro-gen.h"
#include "test/common/wasm/wasm-module-runner.h"
namespace v8 {
namespace internal {
namespace wasm {
namespace test_grow_memory {
using testing::CompileAndInstantiateForTesting;
using v8::internal::testing::ManuallyExternalizedBuffer;
namespace {
void ExportAsMain(WasmFunctionBuilder* f) {
f->builder()->AddExport(CStrVector("main"), f);
}
#define EMIT_CODE_WITH_END(f, code) \
do { \
f->EmitCode(code, sizeof(code)); \
f->Emit(kExprEnd); \
} while (false)
void Cleanup(Isolate* isolate = CcTest::InitIsolateOnce()) {
// By sending a low memory notifications, we will try hard to collect all
// garbage and will therefore also invoke all weak callbacks of actually
// unreachable persistent handles.
reinterpret_cast<v8::Isolate*>(isolate)->LowMemoryNotification();
}
} // namespace
TEST(GrowMemDetaches) {
{
Isolate* isolate = CcTest::InitIsolateOnce();
HandleScope scope(isolate);
Handle<WasmMemoryObject> memory_object =
WasmMemoryObject::New(isolate, 16, 100, SharedFlag::kNotShared)
.ToHandleChecked();
Handle<JSArrayBuffer> buffer(memory_object->array_buffer(), isolate);
int32_t result = WasmMemoryObject::Grow(isolate, memory_object, 0);
CHECK_EQ(16, result);
CHECK_NE(*buffer, memory_object->array_buffer());
CHECK(buffer->was_detached());
}
Cleanup();
}
TEST(Externalized_GrowMemMemSize) {
{
Isolate* isolate = CcTest::InitIsolateOnce();
HandleScope scope(isolate);
Handle<WasmMemoryObject> memory_object =
WasmMemoryObject::New(isolate, 16, 100, SharedFlag::kNotShared)
.ToHandleChecked();
ManuallyExternalizedBuffer external(
handle(memory_object->array_buffer(), isolate));
int32_t result = WasmMemoryObject::Grow(isolate, memory_object, 0);
CHECK_EQ(16, result);
CHECK_NE(*external.buffer_, memory_object->array_buffer());
CHECK(external.buffer_->was_detached());
}
Cleanup();
}
TEST(Run_WasmModule_Buffer_Externalized_GrowMem) {
{
Isolate* isolate = CcTest::InitIsolateOnce();
HandleScope scope(isolate);
TestSignatures sigs;
v8::internal::AccountingAllocator allocator;
Zone zone(&allocator, ZONE_NAME);
WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
WasmFunctionBuilder* f = builder->AddFunction(sigs.i_v());
ExportAsMain(f);
byte code[] = {WASM_GROW_MEMORY(WASM_I32V_1(6)), WASM_DROP,
WASM_MEMORY_SIZE};
EMIT_CODE_WITH_END(f, code);
ZoneBuffer buffer(&zone);
builder->WriteTo(&buffer);
testing::SetupIsolateForWasmModule(isolate);
ErrorThrower thrower(isolate, "Test");
const Handle<WasmInstanceObject> instance =
CompileAndInstantiateForTesting(
isolate, &thrower, ModuleWireBytes(buffer.begin(), buffer.end()))
.ToHandleChecked();
Handle<WasmMemoryObject> memory_object(instance->memory_object(), isolate);
// Fake the Embedder flow by externalizing the array buffer.
ManuallyExternalizedBuffer external1(
handle(memory_object->array_buffer(), isolate));
// Grow using the API.
uint32_t result = WasmMemoryObject::Grow(isolate, memory_object, 4);
CHECK_EQ(16, result);
CHECK(external1.buffer_->was_detached()); // growing always detaches
CHECK_EQ(0, external1.buffer_->byte_length());
CHECK_NE(*external1.buffer_, memory_object->array_buffer());
// Fake the Embedder flow by externalizing the array buffer.
ManuallyExternalizedBuffer external2(
handle(memory_object->array_buffer(), isolate));
// Grow using an internal WASM bytecode.
result = testing::RunWasmModuleForTesting(isolate, instance, 0, nullptr);
CHECK_EQ(26, result);
CHECK(external2.buffer_->was_detached()); // growing always detaches
CHECK_EQ(0, external2.buffer_->byte_length());
CHECK_NE(*external2.buffer_, memory_object->array_buffer());
}
Cleanup();
}
} // namespace test_grow_memory
} // namespace wasm
} // namespace internal
} // namespace v8
#undef EMIT_CODE_WITH_END

View File

@ -11,6 +11,7 @@
#include "src/utils/version.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-memory.h"
#include "src/wasm/wasm-module-builder.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"
@ -941,6 +942,154 @@ TEST(MemoryWithOOBEmptyDataSegment) {
Cleanup();
}
// Utility to free the allocated memory for a buffer that is manually
// externalized in a test.
struct ManuallyExternalizedBuffer {
Isolate* isolate_;
Handle<JSArrayBuffer> buffer_;
void* allocation_base_;
size_t allocation_length_;
bool const should_free_;
ManuallyExternalizedBuffer(JSArrayBuffer buffer, Isolate* isolate)
: isolate_(isolate),
buffer_(buffer, isolate),
allocation_base_(buffer.allocation_base()),
allocation_length_(buffer.allocation_length()),
should_free_(!isolate_->wasm_engine()->memory_tracker()->IsWasmMemory(
buffer.backing_store())) {
if (!isolate_->wasm_engine()->memory_tracker()->IsWasmMemory(
buffer.backing_store())) {
v8::Utils::ToLocal(buffer_)->Externalize();
}
}
~ManuallyExternalizedBuffer() {
if (should_free_) {
buffer_->FreeBackingStoreFromMainThread();
}
}
};
TEST(Run_WasmModule_Buffer_Externalized_GrowMem) {
{
Isolate* isolate = CcTest::InitIsolateOnce();
HandleScope scope(isolate);
TestSignatures sigs;
v8::internal::AccountingAllocator allocator;
Zone zone(&allocator, ZONE_NAME);
WasmModuleBuilder* builder = new (&zone) WasmModuleBuilder(&zone);
WasmFunctionBuilder* f = builder->AddFunction(sigs.i_v());
ExportAsMain(f);
byte code[] = {WASM_GROW_MEMORY(WASM_I32V_1(6)), WASM_DROP,
WASM_MEMORY_SIZE};
EMIT_CODE_WITH_END(f, code);
ZoneBuffer buffer(&zone);
builder->WriteTo(&buffer);
testing::SetupIsolateForWasmModule(isolate);
ErrorThrower thrower(isolate, "Test");
const Handle<WasmInstanceObject> instance =
CompileAndInstantiateForTesting(
isolate, &thrower, ModuleWireBytes(buffer.begin(), buffer.end()))
.ToHandleChecked();
Handle<WasmMemoryObject> memory_object(instance->memory_object(), isolate);
// Fake the Embedder flow by externalizing the array buffer.
ManuallyExternalizedBuffer buffer1(memory_object->array_buffer(), isolate);
// Grow using the API.
uint32_t result = WasmMemoryObject::Grow(isolate, memory_object, 4);
CHECK_EQ(16, result);
CHECK(buffer1.buffer_->was_detached()); // growing always detaches
CHECK_EQ(0, buffer1.buffer_->byte_length());
CHECK_NE(*buffer1.buffer_, memory_object->array_buffer());
// Fake the Embedder flow by externalizing the array buffer.
ManuallyExternalizedBuffer buffer2(memory_object->array_buffer(), isolate);
// Grow using an internal WASM bytecode.
result = testing::RunWasmModuleForTesting(isolate, instance, 0, nullptr);
CHECK_EQ(26, result);
CHECK(buffer2.buffer_->was_detached()); // growing always detaches
CHECK_EQ(0, buffer2.buffer_->byte_length());
CHECK_NE(*buffer2.buffer_, memory_object->array_buffer());
}
Cleanup();
}
TEST(Run_WasmModule_Buffer_Externalized_GrowMemMemSize) {
{
Isolate* isolate = CcTest::InitIsolateOnce();
HandleScope scope(isolate);
Handle<JSArrayBuffer> buffer;
CHECK(wasm::NewArrayBuffer(isolate, 16 * kWasmPageSize).ToHandle(&buffer));
Handle<WasmMemoryObject> mem_obj =
WasmMemoryObject::New(isolate, buffer, 100);
auto const contents = v8::Utils::ToLocal(buffer)->Externalize();
int32_t result = WasmMemoryObject::Grow(isolate, mem_obj, 0);
CHECK_EQ(16, result);
constexpr bool is_wasm_memory = true;
const JSArrayBuffer::Allocation allocation{contents.AllocationBase(),
contents.AllocationLength(),
contents.Data(), is_wasm_memory};
JSArrayBuffer::FreeBackingStore(isolate, allocation);
}
Cleanup();
}
TEST(Run_WasmModule_Buffer_Externalized_Detach) {
{
// Regression test for
// https://bugs.chromium.org/p/chromium/issues/detail?id=731046
Isolate* isolate = CcTest::InitIsolateOnce();
HandleScope scope(isolate);
Handle<JSArrayBuffer> buffer;
CHECK(wasm::NewArrayBuffer(isolate, 16 * kWasmPageSize).ToHandle(&buffer));
auto const contents = v8::Utils::ToLocal(buffer)->Externalize();
wasm::DetachMemoryBuffer(isolate, buffer, true);
constexpr bool is_wasm_memory = true;
const JSArrayBuffer::Allocation allocation{contents.AllocationBase(),
contents.AllocationLength(),
contents.Data(), is_wasm_memory};
JSArrayBuffer::FreeBackingStore(isolate, allocation);
}
Cleanup();
}
TEST(Run_WasmModule_Buffer_Externalized_Regression_UseAfterFree) {
// Regresion test for https://crbug.com/813876
Isolate* isolate = CcTest::InitIsolateOnce();
HandleScope scope(isolate);
Handle<JSArrayBuffer> buffer;
CHECK(wasm::NewArrayBuffer(isolate, 16 * kWasmPageSize).ToHandle(&buffer));
Handle<WasmMemoryObject> mem = WasmMemoryObject::New(isolate, buffer, 128);
auto contents = v8::Utils::ToLocal(buffer)->Externalize();
WasmMemoryObject::Grow(isolate, mem, 0);
constexpr bool is_wasm_memory = true;
JSArrayBuffer::FreeBackingStore(
isolate, JSArrayBuffer::Allocation(contents.AllocationBase(),
contents.AllocationLength(),
contents.Data(), is_wasm_memory));
// Make sure we can write to the buffer without crashing
uint32_t* int_buffer =
reinterpret_cast<uint32_t*>(mem->array_buffer().backing_store());
int_buffer[0] = 0;
}
#if V8_TARGET_ARCH_64_BIT
TEST(Run_WasmModule_Reclaim_Memory) {
// Make sure we can allocate memories without running out of address space.
Isolate* isolate = CcTest::InitIsolateOnce();
Handle<JSArrayBuffer> buffer;
for (int i = 0; i < 256; ++i) {
HandleScope scope(isolate);
CHECK(NewArrayBuffer(isolate, kWasmPageSize).ToHandle(&buffer));
}
}
#endif
TEST(AtomicOpDisassembly) {
{
EXPERIMENTAL_FLAG_SCOPE(threads);

View File

@ -11,6 +11,7 @@
#include "src/utils/version.h"
#include "src/wasm/module-decoder.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-memory.h"
#include "src/wasm/wasm-module-builder.h"
#include "src/wasm/wasm-module.h"
#include "src/wasm/wasm-objects-inl.h"

View File

@ -10,6 +10,7 @@
#include "src/wasm/graph-builder-interface.h"
#include "src/wasm/module-compiler.h"
#include "src/wasm/wasm-import-wrapper-cache.h"
#include "src/wasm/wasm-memory.h"
#include "src/wasm/wasm-objects-inl.h"
namespace v8 {
@ -72,23 +73,29 @@ byte* TestingModuleBuilder::AddMemory(uint32_t size, SharedFlag shared) {
CHECK_NULL(mem_start_);
CHECK_EQ(0, mem_size_);
DCHECK(!instance_object_->has_memory_object());
uint32_t initial_pages = RoundUp(size, kWasmPageSize) / kWasmPageSize;
uint32_t maximum_pages = (test_module_->maximum_pages != 0)
? test_module_->maximum_pages
: initial_pages;
DCHECK_IMPLIES(test_module_->origin == kWasmOrigin,
size % kWasmPageSize == 0);
test_module_->has_memory = true;
uint32_t max_size =
(test_module_->maximum_pages != 0) ? test_module_->maximum_pages : size;
uint32_t alloc_size = RoundUp(size, kWasmPageSize);
Handle<JSArrayBuffer> new_buffer;
if (shared == SharedFlag::kShared) {
CHECK(NewSharedArrayBuffer(isolate_, alloc_size, max_size)
.ToHandle(&new_buffer));
} else {
CHECK(NewArrayBuffer(isolate_, alloc_size).ToHandle(&new_buffer));
}
CHECK(!new_buffer.is_null());
mem_start_ = reinterpret_cast<byte*>(new_buffer->backing_store());
mem_size_ = size;
CHECK(size == 0 || mem_start_);
memset(mem_start_, 0, size);
// Create the WasmMemoryObject.
Handle<WasmMemoryObject> memory_object =
WasmMemoryObject::New(isolate_, initial_pages, maximum_pages, shared)
.ToHandleChecked();
WasmMemoryObject::New(isolate_, new_buffer, max_size);
instance_object_->set_memory_object(*memory_object);
mem_start_ =
reinterpret_cast<byte*>(memory_object->array_buffer().backing_store());
mem_size_ = size;
CHECK(size == 0 || mem_start_);
WasmMemoryObject::AddInstance(isolate_, memory_object, instance_object_);
// TODO(wasm): Delete the following two lines when test-run-wasm will use a
// multiple of kPageSize as memory size. At the moment, the effect of these

View File

@ -1,41 +0,0 @@
// Copyright 2019 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
let kPageSize = 65536;
function allocMems(count, initial, maximum) {
print(`alloc ${count}`);
let result = [];
for (let i = 0; i < count; i++) {
print(` memory #${i} (initial=${initial}, maximum=${maximum})...`);
result.push(new WebAssembly.Memory({initial: initial, maximum: maximum}));
}
return result;
}
function check(mems, initial) {
for (m of mems) {
assertEquals(initial * kPageSize, m.buffer.byteLength);
}
}
function test(count, initial, maximum) {
let mems = allocMems(count, initial, maximum);
check(mems, initial);
}
test(1, 1, 1);
test(1, 1, 2);
test(1, 1, 3);
test(1, 1, 4);
test(2, 1, 1);
test(2, 1, 2);
test(2, 1, 3);
test(2, 1, 4);
test(1, 1, undefined);
test(2, 1, undefined);
test(3, 1, undefined);
test(4, 1, undefined);

View File

@ -52,6 +52,13 @@ class JSTypedLoweringTest : public TypedGraphTest {
return reducer.Reduce(node);
}
Handle<JSArrayBuffer> NewArrayBuffer(void* bytes, size_t byte_length) {
Handle<JSArrayBuffer> buffer =
factory()->NewJSArrayBuffer(SharedFlag::kNotShared);
JSArrayBuffer::Setup(buffer, isolate(), true, bytes, byte_length);
return buffer;
}
JSOperatorBuilder* javascript() { return &javascript_; }
private:

View File

@ -10,7 +10,6 @@
#include "include/v8.h"
#include "src/api/api-inl.h"
#include "src/base/build_config.h"
#include "src/objects/backing-store.h"
#include "src/objects/objects-inl.h"
#include "src/wasm/wasm-objects.h"
#include "test/unittests/test-utils.h"
@ -1988,44 +1987,23 @@ class ValueSerializerTestWithSharedArrayBufferClone
ValueSerializerTestWithSharedArrayBufferClone()
: serializer_delegate_(this), deserializer_delegate_(this) {}
void InitializeData(const std::vector<uint8_t>& data, bool is_wasm_memory) {
void InitializeData(const std::vector<uint8_t>& data) {
data_ = data;
{
Context::Scope scope(serialization_context());
input_buffer_ =
NewSharedArrayBuffer(data_.data(), data_.size(), is_wasm_memory);
SharedArrayBuffer::New(isolate(), data_.data(), data_.size());
}
{
Context::Scope scope(deserialization_context());
output_buffer_ =
NewSharedArrayBuffer(data_.data(), data_.size(), is_wasm_memory);
SharedArrayBuffer::New(isolate(), data_.data(), data_.size());
}
}
const Local<SharedArrayBuffer>& input_buffer() { return input_buffer_; }
const Local<SharedArrayBuffer>& output_buffer() { return output_buffer_; }
Local<SharedArrayBuffer> NewSharedArrayBuffer(void* data, size_t byte_length,
bool is_wasm_memory) {
if (is_wasm_memory) {
// TODO(titzer): there is no way to create Wasm memory backing stores
// through the API, or to create a shared array buffer whose backing
// store is wasm memory, so use the internal API.
DCHECK_EQ(0, byte_length % i::wasm::kWasmPageSize);
auto pages = byte_length / i::wasm::kWasmPageSize;
auto i_isolate = reinterpret_cast<i::Isolate*>(isolate());
auto backing_store = i::BackingStore::AllocateWasmMemory(
i_isolate, pages, pages, i::SharedFlag::kShared);
memcpy(backing_store->buffer_start(), data, byte_length);
i::Handle<i::JSArrayBuffer> buffer =
i_isolate->factory()->NewJSSharedArrayBuffer();
buffer->Attach(std::move(backing_store));
return Utils::ToLocalShared(buffer);
} else {
return SharedArrayBuffer::New(isolate(), data, byte_length);
}
}
static void SetUpTestCase() {
flag_was_enabled_ = i::FLAG_harmony_sharedarraybuffer;
i::FLAG_harmony_sharedarraybuffer = true;
@ -2097,7 +2075,7 @@ bool ValueSerializerTestWithSharedArrayBufferClone::flag_was_enabled_ = false;
TEST_F(ValueSerializerTestWithSharedArrayBufferClone,
RoundTripSharedArrayBufferClone) {
InitializeData({0x00, 0x01, 0x80, 0xFF}, false);
InitializeData({0x00, 0x01, 0x80, 0xFF});
EXPECT_CALL(serializer_delegate_,
GetSharedArrayBufferId(isolate(), input_buffer()))
@ -2136,7 +2114,7 @@ TEST_F(ValueSerializerTestWithSharedArrayBufferClone,
std::vector<uint8_t> data = {0x00, 0x01, 0x80, 0xFF};
data.resize(65536);
InitializeData(data, true);
InitializeData(data);
EXPECT_CALL(serializer_delegate_,
GetSharedArrayBufferId(isolate(), input_buffer()))

View File

@ -25,11 +25,11 @@
#include "src/codegen/assembler-inl.h"
#include "src/codegen/macro-assembler-inl.h"
#include "src/execution/simulator.h"
#include "src/objects/backing-store.h"
#include "src/trap-handler/trap-handler.h"
#include "src/utils/allocation.h"
#include "src/utils/vector.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-memory.h"
#include "test/common/assembler-tester.h"
#include "test/unittests/test-utils.h"
@ -80,13 +80,19 @@ class TrapHandlerTest : public TestWithIsolate,
public ::testing::WithParamInterface<TrapHandlerStyle> {
protected:
void SetUp() override {
backing_store_ = BackingStore::AllocateWasmMemory(i_isolate(), 1, 1,
SharedFlag::kNotShared);
CHECK(backing_store_);
CHECK(backing_store_->has_guard_regions());
// The allocated backing store ends with a guard page.
crash_address_ = reinterpret_cast<Address>(backing_store_->buffer_start()) +
backing_store_->byte_length() + 32;
void* base = nullptr;
size_t length = 0;
accessible_memory_start_ =
i_isolate()
->wasm_engine()
->memory_tracker()
->TryAllocateBackingStoreForTesting(
i_isolate()->heap(), 1 * kWasmPageSize, &base, &length);
memory_buffer_ =
base::AddressRegion(reinterpret_cast<Address>(base), length);
// The allocated memory buffer ends with a guard page.
crash_address_ = memory_buffer_.end() - 32;
// Allocate a buffer for the generated code.
buffer_ = AllocateAssemblerBuffer(AssemblerBase::kMinimalBufferSize,
GetRandomMmapAddr());
@ -116,7 +122,10 @@ class TrapHandlerTest : public TestWithIsolate,
CHECK(!GetThreadInWasmFlag());
buffer_.reset();
recovery_buffer_.reset();
backing_store_.reset();
// Free the allocated backing store.
i_isolate()->wasm_engine()->memory_tracker()->FreeBackingStoreForTesting(
memory_buffer_, accessible_memory_start_);
// Clean up the trap handler
trap_handler::RemoveTrapHandler();
@ -243,12 +252,14 @@ class TrapHandlerTest : public TestWithIsolate,
bool test_handler_executed() { return g_test_handler_executed; }
// The backing store used for testing the trap handler.
std::unique_ptr<BackingStore> backing_store_;
// Allocated memory which corresponds to wasm memory with guard regions.
base::AddressRegion memory_buffer_;
// Address within the guard region of the wasm memory. Accessing this memory
// address causes a signal or exception.
Address crash_address_;
// The start of the accessible region in the allocated memory. This pointer is
// needed to de-register the memory from the wasm memory tracker again.
void* accessible_memory_start_;
// Buffer for generated code.
std::unique_ptr<TestingAssemblerBuffer> buffer_;

View File

@ -9,6 +9,7 @@
#include "src/wasm/jump-table-assembler.h"
#include "src/wasm/wasm-code-manager.h"
#include "src/wasm/wasm-engine.h"
#include "src/wasm/wasm-memory.h"
namespace v8 {
namespace internal {