diff --git a/BUILD.bazel b/BUILD.bazel index 213eb48480..69062981a2 100644 --- a/BUILD.bazel +++ b/BUILD.bazel @@ -909,6 +909,7 @@ filegroup( "src/objects/heap-object.tq", "src/objects/js-array-buffer.tq", "src/objects/js-array.tq", + "src/objects/js-atomics-synchronization.tq", "src/objects/js-collection-iterator.tq", "src/objects/js-collection.tq", "src/objects/js-function.tq", @@ -1106,6 +1107,7 @@ filegroup( "src/builtins/builtins-array.cc", "src/builtins/builtins-arraybuffer.cc", "src/builtins/builtins-async-module.cc", + "src/builtins/builtins-atomics-synchronization.cc", "src/builtins/builtins-bigint.cc", "src/builtins/builtins-callsite.cc", "src/builtins/builtins-collections.cc", @@ -1714,6 +1716,9 @@ filegroup( "src/objects/js-array-buffer.h", "src/objects/js-array-inl.h", "src/objects/js-array.h", + "src/objects/js-atomics-synchronization-inl.h", + "src/objects/js-atomics-synchronization.h", + "src/objects/js-atomics-synchronization.cc", "src/objects/js-collection-inl.h", "src/objects/js-collection-iterator.h", "src/objects/js-collection-iterator-inl.h", diff --git a/BUILD.gn b/BUILD.gn index 3096c0bc90..adffeec107 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -1809,6 +1809,7 @@ torque_files = [ "src/objects/heap-object.tq", "src/objects/js-array-buffer.tq", "src/objects/js-array.tq", + "src/objects/js-atomics-synchronization.tq", "src/objects/js-collection-iterator.tq", "src/objects/js-collection.tq", "src/objects/js-function.tq", @@ -3231,6 +3232,8 @@ v8_header_set("v8_internal_headers") { "src/objects/js-array-buffer.h", "src/objects/js-array-inl.h", "src/objects/js-array.h", + "src/objects/js-atomics-synchronization-inl.h", + "src/objects/js-atomics-synchronization.h", "src/objects/js-collection-inl.h", "src/objects/js-collection-iterator-inl.h", "src/objects/js-collection-iterator.h", @@ -4195,6 +4198,7 @@ v8_source_set("v8_base_without_compiler") { "src/builtins/builtins-array.cc", "src/builtins/builtins-arraybuffer.cc", "src/builtins/builtins-async-module.cc", + "src/builtins/builtins-atomics-synchronization.cc", "src/builtins/builtins-bigint.cc", "src/builtins/builtins-callsite.cc", "src/builtins/builtins-collections.cc", @@ -4430,6 +4434,7 @@ v8_source_set("v8_base_without_compiler") { "src/objects/field-type.cc", "src/objects/intl-objects.cc", "src/objects/js-array-buffer.cc", + "src/objects/js-atomics-synchronization.cc", "src/objects/js-break-iterator.cc", "src/objects/js-collator.cc", "src/objects/js-date-time-format.cc", diff --git a/include/v8-internal.h b/include/v8-internal.h index 7101ab9141..eaa58de505 100644 --- a/include/v8-internal.h +++ b/include/v8-internal.h @@ -307,14 +307,15 @@ constexpr uint64_t kExternalPointerTagShift = 48; // clang-format off enum ExternalPointerTag : uint64_t { kExternalPointerNullTag = MAKE_TAG(0b0000000000000000), - kExternalPointerFreeEntryTag = MAKE_TAG(0b0111111110000000), - kExternalStringResourceTag = MAKE_TAG(0b1000000011111111), - kExternalStringResourceDataTag = MAKE_TAG(0b1000000101111111), - kForeignForeignAddressTag = MAKE_TAG(0b1000000110111111), - kNativeContextMicrotaskQueueTag = MAKE_TAG(0b1000000111011111), - kEmbedderDataSlotPayloadTag = MAKE_TAG(0b1000000111101111), - kCodeEntryPointTag = MAKE_TAG(0b1000000111110111), - kExternalObjectValueTag = MAKE_TAG(0b1000000111111011), + kExternalPointerFreeEntryTag = MAKE_TAG(0b0111111100000000), + kWaiterQueueNodeTag = MAKE_TAG(0b1000000111111111), + kExternalStringResourceTag = MAKE_TAG(0b1000001011111111), + kExternalStringResourceDataTag = MAKE_TAG(0b1000001101111111), + kForeignForeignAddressTag = MAKE_TAG(0b1000001110111111), + kNativeContextMicrotaskQueueTag = MAKE_TAG(0b1000001111011111), + kEmbedderDataSlotPayloadTag = MAKE_TAG(0b1000001111101111), + kCodeEntryPointTag = MAKE_TAG(0b1000001111110111), + kExternalObjectValueTag = MAKE_TAG(0b1000001111111011), }; // clang-format on #undef MAKE_TAG diff --git a/src/builtins/base.tq b/src/builtins/base.tq index 5053a17ec6..ed45541ee4 100644 --- a/src/builtins/base.tq +++ b/src/builtins/base.tq @@ -12,6 +12,7 @@ #include 'src/objects/call-site-info.h' #include 'src/objects/elements-kind.h' #include 'src/objects/free-space.h' +#include 'src/objects/js-atomics-synchronization.h' #include 'src/objects/js-function.h' #include 'src/objects/js-generator.h' #include 'src/objects/js-promise.h' diff --git a/src/builtins/builtins-atomics-synchronization.cc b/src/builtins/builtins-atomics-synchronization.cc new file mode 100644 index 0000000000..0db6dcbd8d --- /dev/null +++ b/src/builtins/builtins-atomics-synchronization.cc @@ -0,0 +1,92 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/builtins/builtins-utils-inl.h" +#include "src/objects/js-atomics-synchronization-inl.h" + +namespace v8 { +namespace internal { + +BUILTIN(AtomicsMutexConstructor) { + DCHECK(FLAG_harmony_struct); + HandleScope scope(isolate); + return *JSAtomicsMutex::Create(isolate); +} + +BUILTIN(AtomicsMutexLock) { + DCHECK(FLAG_harmony_struct); + constexpr char method_name[] = "Atomics.Mutex.lock"; + HandleScope scope(isolate); + + Handle js_mutex_obj = args.atOrUndefined(isolate, 1); + if (!js_mutex_obj->IsJSAtomicsMutex()) { + THROW_NEW_ERROR_RETURN_FAILURE( + isolate, NewTypeError(MessageTemplate::kMethodInvokedOnWrongType, + isolate->factory()->NewStringFromAsciiChecked( + method_name))); + } + Handle js_mutex = Handle::cast(js_mutex_obj); + Handle run_under_lock = args.atOrUndefined(isolate, 2); + if (!run_under_lock->IsCallable()) { + THROW_NEW_ERROR_RETURN_FAILURE(isolate, + NewTypeError(MessageTemplate::kNotCallable)); + } + + // Like Atomics.wait, synchronous locking may block, and so is disallowed on + // the main thread. + // + // This is not a recursive lock, so also throw if recursively locking. + if (!isolate->allow_atomics_wait() || js_mutex->IsCurrentThreadOwner()) { + THROW_NEW_ERROR_RETURN_FAILURE( + isolate, NewTypeError(MessageTemplate::kAtomicsMutexLockNotAllowed)); + } + + Handle result; + { + // TODO(syg): Make base::LockGuard work with Handles. + JSAtomicsMutex::Lock(isolate, js_mutex); + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, + Execution::Call(isolate, run_under_lock, + isolate->factory()->undefined_value(), 0, nullptr)); + js_mutex->Unlock(isolate); + } + + return *result; +} + +BUILTIN(AtomicsMutexTryLock) { + DCHECK(FLAG_harmony_struct); + constexpr char method_name[] = "Atomics.Mutex.tryLock"; + HandleScope scope(isolate); + + Handle js_mutex_obj = args.atOrUndefined(isolate, 1); + if (!js_mutex_obj->IsJSAtomicsMutex()) { + THROW_NEW_ERROR_RETURN_FAILURE( + isolate, NewTypeError(MessageTemplate::kMethodInvokedOnWrongType, + isolate->factory()->NewStringFromAsciiChecked( + method_name))); + } + Handle js_mutex = Handle::cast(js_mutex_obj); + Handle run_under_lock = args.atOrUndefined(isolate, 2); + if (!run_under_lock->IsCallable()) { + THROW_NEW_ERROR_RETURN_FAILURE(isolate, + NewTypeError(MessageTemplate::kNotCallable)); + } + + if (js_mutex->TryLock()) { + Handle result; + ASSIGN_RETURN_FAILURE_ON_EXCEPTION( + isolate, result, + Execution::Call(isolate, run_under_lock, + isolate->factory()->undefined_value(), 0, nullptr)); + js_mutex->Unlock(isolate); + return ReadOnlyRoots(isolate).true_value(); + } + + return ReadOnlyRoots(isolate).false_value(); +} + +} // namespace internal +} // namespace v8 diff --git a/src/builtins/builtins-definitions.h b/src/builtins/builtins-definitions.h index dc9cbb093f..0e98586f7f 100644 --- a/src/builtins/builtins-definitions.h +++ b/src/builtins/builtins-definitions.h @@ -995,9 +995,12 @@ namespace internal { TFS(WeakCollectionDelete, kCollection, kKey) \ TFS(WeakCollectionSet, kCollection, kKey, kValue) \ \ - /* JS Structs */ \ + /* JS Structs and friends */ \ CPP(SharedStructTypeConstructor) \ CPP(SharedStructConstructor) \ + CPP(AtomicsMutexConstructor) \ + CPP(AtomicsMutexLock) \ + CPP(AtomicsMutexTryLock) \ \ /* AsyncGenerator */ \ \ diff --git a/src/common/message-template.h b/src/common/message-template.h index b9f4f0117c..af9d7bf2c5 100644 --- a/src/common/message-template.h +++ b/src/common/message-template.h @@ -41,6 +41,8 @@ namespace internal { T(AwaitNotInDebugEvaluate, \ "await can not be used when evaluating code " \ "while paused in the debugger") \ + T(AtomicsMutexLockNotAllowed, \ + "Atomics.Mutex.lock cannot be called in this context") \ T(AtomicsWaitNotAllowed, "Atomics.wait cannot be called in this context") \ T(BadRoundingType, "RoundingType is not fractionDigits") \ T(BadSortComparisonFunction, \ @@ -126,8 +128,6 @@ namespace internal { T(LocaleBadParameters, "Incorrect locale information provided") \ T(ListFormatBadParameters, "Incorrect ListFormat information provided") \ T(MapperFunctionNonCallable, "flatMap mapper function is not callable") \ - T(MethodCalledOnWrongObject, \ - "Method % called on a non-object or on a wrong type of object.") \ T(MethodInvokedOnWrongType, "Method invoked on an object that is not %.") \ T(NoAccess, "no access") \ T(NonCallableInInstanceOfCheck, \ diff --git a/src/compiler/types.cc b/src/compiler/types.cc index 8b1e480014..25b8facda6 100644 --- a/src/compiler/types.cc +++ b/src/compiler/types.cc @@ -264,6 +264,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) { case JS_PROMISE_TYPE: case JS_SHADOW_REALM_TYPE: case JS_SHARED_STRUCT_TYPE: + case JS_ATOMICS_MUTEX_TYPE: case JS_TEMPORAL_CALENDAR_TYPE: case JS_TEMPORAL_DURATION_TYPE: case JS_TEMPORAL_INSTANT_TYPE: diff --git a/src/diagnostics/objects-debug.cc b/src/diagnostics/objects-debug.cc index 7507adb4c0..8af05226ae 100644 --- a/src/diagnostics/objects-debug.cc +++ b/src/diagnostics/objects-debug.cc @@ -32,6 +32,7 @@ #include "src/objects/instance-type.h" #include "src/objects/js-array-buffer-inl.h" #include "src/objects/js-array-inl.h" +#include "src/objects/js-atomics-synchronization-inl.h" #include "src/objects/objects-inl.h" #include "src/objects/objects.h" #include "src/objects/turbofan-types-inl.h" @@ -1239,6 +1240,17 @@ void JSSharedStruct::JSSharedStructVerify(Isolate* isolate) { } } +void JSAtomicsMutex::JSAtomicsMutexVerify(Isolate* isolate) { + CHECK(IsJSAtomicsMutex()); + CHECK(InSharedHeap()); + JSObjectVerify(isolate); + Map mutex_map = map(); + CHECK(mutex_map.InSharedHeap()); + CHECK(mutex_map.GetBackPointer().IsUndefined(isolate)); + CHECK(!mutex_map.is_extensible()); + CHECK(!mutex_map.is_prototype_map()); +} + void WeakCell::WeakCellVerify(Isolate* isolate) { CHECK(IsWeakCell()); diff --git a/src/diagnostics/objects-printer.cc b/src/diagnostics/objects-printer.cc index 4fffe5630e..fcf80b9450 100644 --- a/src/diagnostics/objects-printer.cc +++ b/src/diagnostics/objects-printer.cc @@ -1463,6 +1463,16 @@ void JSSharedStruct::JSSharedStructPrint(std::ostream& os) { JSObjectPrintBody(os, *this); } +void JSAtomicsMutex::JSAtomicsMutexPrint(std::ostream& os) { + JSObjectPrintHeader(os, *this, "JSAtomicsMutex"); + Isolate* isolate = GetIsolateFromWritableObject(*this); + os << "\n - isolate: " << isolate; + if (isolate->is_shared()) os << " (shared)"; + os << "\n - state: " << this->state(); + os << "\n - owner_thread_id: " << this->owner_thread_id(); + JSObjectPrintBody(os, *this); +} + void JSWeakMap::JSWeakMapPrint(std::ostream& os) { JSObjectPrintHeader(os, *this, "JSWeakMap"); os << "\n - table: " << Brief(table()); diff --git a/src/execution/isolate.cc b/src/execution/isolate.cc index c2a4c26599..9d78977705 100644 --- a/src/execution/isolate.cc +++ b/src/execution/isolate.cc @@ -5674,5 +5674,26 @@ void Isolate::DetachFromSharedIsolate() { #endif // DEBUG } +#ifdef V8_SANDBOXED_EXTERNAL_POINTERS +ExternalPointer_t Isolate::EncodeWaiterQueueNodeAsExternalPointer( + Address node) { + DCHECK_NE(kNullAddress, node); + Isolate* shared = shared_isolate(); + uint32_t index; + ExternalPointer_t ext; + if (waiter_queue_node_external_pointer_.IsJust()) { + ext = waiter_queue_node_external_pointer_.FromJust(); + index = ext >> kExternalPointerIndexShift; + } else { + index = shared->external_pointer_table().Allocate(); + ext = index << kExternalPointerIndexShift; + waiter_queue_node_external_pointer_ = Just(ext); + } + DCHECK_NE(0, index); + shared->external_pointer_table().Set(index, node, kWaiterQueueNodeTag); + return ext; +} +#endif // V8_SANDBOXED_EXTERNAL_POINTERS + } // namespace internal } // namespace v8 diff --git a/src/execution/isolate.h b/src/execution/isolate.h index a1900c4f4a..6ad9e4efe0 100644 --- a/src/execution/isolate.h +++ b/src/execution/isolate.h @@ -1948,6 +1948,12 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory { Address external_pointer_table_address() { return reinterpret_cast
(&isolate_data_.external_pointer_table_); } + + Maybe GetWaiterQueueNodeExternalPointer() const { + return waiter_queue_node_external_pointer_; + } + + ExternalPointer_t EncodeWaiterQueueNodeAsExternalPointer(Address node); #endif struct PromiseHookFields { @@ -2411,6 +2417,13 @@ class V8_EXPORT_PRIVATE Isolate final : private HiddenFactory { // isolates or when no shared isolate is used. Isolate* shared_isolate_ = nullptr; +#ifdef V8_SANDBOXED_EXTERNAL_POINTERS + // A pointer to Isolate's main thread's WaiterQueueNode. It is used to wait + // for JS-exposed mutex or condition variable. + Maybe waiter_queue_node_external_pointer_ = + Nothing(); +#endif + #if DEBUG // Set to true once during isolate initialization right when attaching to the // shared isolate. If there was no shared isolate given it will still be set diff --git a/src/heap/factory-inl.h b/src/heap/factory-inl.h index c022f12450..7b83ed999f 100644 --- a/src/heap/factory-inl.h +++ b/src/heap/factory-inl.h @@ -58,6 +58,27 @@ Handle Factory::NewJSArrayWithElements(Handle elements, allocation); } +Handle Factory::NewJSObjectFromMap( + Handle map, AllocationType allocation, + Handle allocation_site) { + return NewJSObjectFromMapInternal(map, allocation, allocation_site, + kTaggedAligned); +} + +Handle Factory::NewSystemPointerAlignedJSObjectFromMap( + Handle map, AllocationType allocation, + Handle allocation_site) { + AllocationAlignment alignment; + if (kTaggedSize == kSystemPointerSize) { + alignment = kTaggedAligned; + } else { + DCHECK_EQ(kDoubleSize, kSystemPointerSize); + alignment = kDoubleAligned; + } + return NewJSObjectFromMapInternal(map, allocation, allocation_site, + alignment); +} + Handle Factory::NewFastOrSlowJSObjectFromMap( Handle map, int number_of_slow_properties, AllocationType allocation, Handle allocation_site) { diff --git a/src/heap/factory.cc b/src/heap/factory.cc index 6bcc0f56f9..61f98726dc 100644 --- a/src/heap/factory.cc +++ b/src/heap/factory.cc @@ -339,7 +339,7 @@ HeapObject Factory::AllocateRaw(int size, AllocationType allocation, HeapObject Factory::AllocateRawWithAllocationSite( Handle map, AllocationType allocation, - Handle allocation_site) { + Handle allocation_site, AllocationAlignment alignment) { DCHECK(map->instance_type() != MAP_TYPE); int size = map->instance_size(); if (!allocation_site.is_null()) { @@ -347,7 +347,7 @@ HeapObject Factory::AllocateRawWithAllocationSite( size += AllocationMemento::kSize; } HeapObject result = allocator()->AllocateRawWith( - size, allocation); + size, allocation, AllocationOrigin::kRuntime, alignment); WriteBarrierMode write_barrier_mode = allocation == AllocationType::kYoung ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER; @@ -2545,9 +2545,9 @@ void Factory::InitializeJSObjectBody(JSObject obj, Map map, int start_offset) { } } -Handle Factory::NewJSObjectFromMap( +Handle Factory::NewJSObjectFromMapInternal( Handle map, AllocationType allocation, - Handle allocation_site) { + Handle allocation_site, AllocationAlignment alignment) { // JSFunctions should be allocated using AllocateFunction to be // properly initialized. DCHECK(!InstanceTypeChecker::IsJSFunction((map->instance_type()))); @@ -2556,8 +2556,8 @@ Handle Factory::NewJSObjectFromMap( // AllocateGlobalObject to be properly initialized. DCHECK(map->instance_type() != JS_GLOBAL_OBJECT_TYPE); - JSObject js_obj = JSObject::cast( - AllocateRawWithAllocationSite(map, allocation, allocation_site)); + JSObject js_obj = JSObject::cast(AllocateRawWithAllocationSite( + map, allocation, allocation_site, alignment)); InitializeJSObjectFromMap(js_obj, *empty_fixed_array(), *map); diff --git a/src/heap/factory.h b/src/heap/factory.h index 28de4ff6ce..d4ccf0b889 100644 --- a/src/heap/factory.h +++ b/src/heap/factory.h @@ -524,7 +524,10 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase { // points to the site. // JS objects are pretenured when allocated by the bootstrapper and // runtime. - Handle NewJSObjectFromMap( + inline Handle NewJSObjectFromMap( + Handle map, AllocationType allocation = AllocationType::kYoung, + Handle allocation_site = Handle::null()); + inline Handle NewSystemPointerAlignedJSObjectFromMap( Handle map, AllocationType allocation = AllocationType::kYoung, Handle allocation_site = Handle::null()); // Like NewJSObjectFromMap, but includes allocating a properties dictionary. @@ -1051,7 +1054,12 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase { HeapObject AllocateRawWithAllocationSite( Handle map, AllocationType allocation, - Handle allocation_site); + Handle allocation_site, + AllocationAlignment alignment = kTaggedAligned); + + Handle NewJSObjectFromMapInternal( + Handle map, AllocationType allocation, + Handle allocation_site, AllocationAlignment alignment); Handle NewJSArrayBufferView( Handle map, Handle elements, diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc index a13a3db87e..7e1e7f74cd 100644 --- a/src/heap/mark-compact.cc +++ b/src/heap/mark-compact.cc @@ -1214,6 +1214,8 @@ class MarkCompactCollector::RootMarkingVisitor final : public RootVisitor { // alive other code objects reachable through the weak list but they should // keep alive its embedded pointers (which would otherwise be dropped). // - Prefix of the string table. +// - If V8_SANDBOXED_EXTERNAL_POINTERS, client Isolates' waiter queue node +// ExternalPointer_t in shared Isolates. class MarkCompactCollector::CustomRootBodyMarkingVisitor final : public ObjectVisitorWithCageBases { public: @@ -2090,6 +2092,17 @@ void MarkCompactCollector::MarkObjectsFromClientHeaps() { obj = iterator.Next()) { obj.IterateFast(cage_base, &visitor); } + +#ifdef V8_SANDBOXED_EXTERNAL_POINTERS + // Custom marking for the external pointer table entry used to hold + // client Isolates' WaiterQueueNode, which is used by JS mutexes and + // condition variables. + ExternalPointer_t waiter_queue_ext; + if (client->GetWaiterQueueNodeExternalPointer().To(&waiter_queue_ext)) { + uint32_t index = waiter_queue_ext >> kExternalPointerIndexShift; + client->shared_isolate()->external_pointer_table().Mark(index); + } +#endif // V8_SANDBOXED_EXTERNAL_POINTERS }); } diff --git a/src/init/bootstrapper.cc b/src/init/bootstrapper.cc index cd102522fb..9040e95202 100644 --- a/src/init/bootstrapper.cc +++ b/src/init/bootstrapper.cc @@ -41,6 +41,7 @@ #endif // V8_INTL_SUPPORT #include "src/objects/js-array-buffer-inl.h" #include "src/objects/js-array-inl.h" +#include "src/objects/js-atomics-synchronization.h" #ifdef V8_INTL_SUPPORT #include "src/objects/js-break-iterator.h" #include "src/objects/js-collator.h" @@ -515,6 +516,33 @@ V8_NOINLINE Handle InstallFunction( instance_size, inobject_properties, prototype, call); } +V8_NOINLINE Handle CreateSharedObjectConstructor( + Isolate* isolate, Handle name, InstanceType type, int instance_size, + Builtin builtin) { + Factory* factory = isolate->factory(); + Handle info = factory->NewSharedFunctionInfoForBuiltin( + name, builtin, FunctionKind::kNormalFunction); + info->set_language_mode(LanguageMode::kStrict); + Handle constructor = + Factory::JSFunctionBuilder{isolate, info, isolate->native_context()} + .set_map(isolate->strict_function_map()) + .Build(); + constexpr int in_object_properties = 0; + Handle instance_map = + factory->NewMap(type, instance_size, TERMINAL_FAST_ELEMENTS_KIND, + in_object_properties, AllocationType::kSharedMap); + // Shared objects have fixed layout ahead of time, so there's no slack. + instance_map->SetInObjectUnusedPropertyFields(0); + // Shared objects are not extensible and have a null prototype. + instance_map->set_is_extensible(false); + JSFunction::SetInitialMap(isolate, constructor, instance_map, + factory->null_value()); + // The constructor itself is not a shared object, so the shared map should not + // point to it. + instance_map->set_constructor_or_back_pointer(*factory->null_value()); + return constructor; +} + // This sets a constructor instance type on the constructor map which will be // used in IsXxxConstructor() predicates. Having such predicates helps figuring // out if a protector cell should be invalidated. If there are no protector @@ -4606,6 +4634,26 @@ void Genesis::InitializeGlobal_harmony_struct() { shared_struct_type_fun->shared().set_length(1); JSObject::AddProperty(isolate(), global, "SharedStructType", shared_struct_type_fun, DONT_ENUM); + + { // Atomics.Mutex + // TODO(syg): Make a single canonical copy of the map. + Handle mutex_str = + isolate()->factory()->InternalizeUtf8String("Mutex"); + Handle mutex_fun = CreateSharedObjectConstructor( + isolate(), mutex_str, JS_ATOMICS_MUTEX_TYPE, + JSAtomicsMutex::kHeaderSize, Builtin::kAtomicsMutexConstructor); + mutex_fun->shared().set_internal_formal_parameter_count( + JSParameterCount(0)); + mutex_fun->shared().set_length(0); + native_context()->set_js_atomics_mutex_map(mutex_fun->initial_map()); + JSObject::AddProperty(isolate(), isolate()->atomics_object(), mutex_str, + mutex_fun, DONT_ENUM); + + SimpleInstallFunction(isolate(), mutex_fun, "lock", + Builtin::kAtomicsMutexLock, 2, true); + SimpleInstallFunction(isolate(), mutex_fun, "tryLock", + Builtin::kAtomicsMutexTryLock, 2, true); + } } void Genesis::InitializeGlobal_harmony_array_find_last() { diff --git a/src/objects/all-objects-inl.h b/src/objects/all-objects-inl.h index d4ca90272e..70f4e7abaa 100644 --- a/src/objects/all-objects-inl.h +++ b/src/objects/all-objects-inl.h @@ -39,6 +39,7 @@ #include "src/objects/instance-type-inl.h" #include "src/objects/js-array-buffer-inl.h" #include "src/objects/js-array-inl.h" +#include "src/objects/js-atomics-synchronization-inl.h" #include "src/objects/js-collection-inl.h" #include "src/objects/js-function-inl.h" #include "src/objects/js-generator-inl.h" diff --git a/src/objects/contexts.h b/src/objects/contexts.h index 447142ff2a..ba14db0495 100644 --- a/src/objects/contexts.h +++ b/src/objects/contexts.h @@ -176,6 +176,7 @@ enum ContextLookupFlags { js_array_packed_double_elements_map) \ V(JS_ARRAY_HOLEY_DOUBLE_ELEMENTS_MAP_INDEX, Map, \ js_array_holey_double_elements_map) \ + V(JS_ATOMICS_MUTEX_MAP, Map, js_atomics_mutex_map) \ V(JS_MAP_FUN_INDEX, JSFunction, js_map_fun) \ V(JS_MAP_MAP_INDEX, Map, js_map_map) \ V(JS_MODULE_NAMESPACE_MAP, Map, js_module_namespace_map) \ diff --git a/src/objects/js-atomics-synchronization-inl.h b/src/objects/js-atomics-synchronization-inl.h new file mode 100644 index 0000000000..dc3efa776f --- /dev/null +++ b/src/objects/js-atomics-synchronization-inl.h @@ -0,0 +1,115 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_OBJECTS_JS_ATOMICS_SYNCHRONIZATION_INL_H_ +#define V8_OBJECTS_JS_ATOMICS_SYNCHRONIZATION_INL_H_ + +#include "src/common/assert-scope.h" +#include "src/common/globals.h" +#include "src/heap/heap-write-barrier-inl.h" +#include "src/objects/js-atomics-synchronization.h" +#include "src/objects/objects-inl.h" + +// Has to be the last include (doesn't have include guards): +#include "src/objects/object-macros.h" + +namespace v8 { +namespace internal { + +#include "torque-generated/src/objects/js-atomics-synchronization-tq-inl.inc" + +TQ_OBJECT_CONSTRUCTORS_IMPL(JSAtomicsMutex) + +CAST_ACCESSOR(JSAtomicsMutex) + +// static +void JSAtomicsMutex::Lock(Isolate* requester, Handle mutex) { + DisallowGarbageCollection no_gc; + // First try to lock an uncontended mutex, which should be the common case. If + // this fails, then go to the slow path to possibly put the current thread to + // sleep. + // + // The fast path is done using a weak CAS which may fail spuriously on + // architectures with load-link/store-conditional instructions. + std::atomic* state = mutex->AtomicStatePtr(); + StateT expected = kUnlocked; + if (V8_UNLIKELY(!state->compare_exchange_weak(expected, kLockedUncontended, + std::memory_order_acquire, + std::memory_order_relaxed))) { + LockSlowPath(requester, mutex, state); + } + mutex->SetCurrentThreadAsOwner(); +} + +bool JSAtomicsMutex::TryLock() { + DisallowGarbageCollection no_gc; + StateT expected = kUnlocked; + if (V8_LIKELY(AtomicStatePtr()->compare_exchange_strong( + expected, kLockedUncontended, std::memory_order_acquire, + std::memory_order_relaxed))) { + SetCurrentThreadAsOwner(); + return true; + } + return false; +} + +void JSAtomicsMutex::Unlock(Isolate* requester) { + DisallowGarbageCollection no_gc; + // First try to unlock an uncontended mutex, which should be the common + // case. If this fails, then go to the slow path to wake a waiting thread. + // + // In contrast to Lock, the fast path is done using a strong CAS which does + // not fail spuriously. This simplifies the slow path by guaranteeing that + // there is at least one waiter to be notified. + DCHECK(IsCurrentThreadOwner()); + ClearOwnerThread(); + std::atomic* state = AtomicStatePtr(); + StateT expected = kLockedUncontended; + if (V8_LIKELY(state->compare_exchange_strong(expected, kUnlocked, + std::memory_order_release, + std::memory_order_relaxed))) { + return; + } + UnlockSlowPath(requester, state); +} + +bool JSAtomicsMutex::IsHeld() { + return AtomicStatePtr()->load(std::memory_order_relaxed) & kIsLockedBit; +} + +bool JSAtomicsMutex::IsCurrentThreadOwner() { + bool result = AtomicOwnerThreadIdPtr()->load(std::memory_order_relaxed) == + ThreadId::Current().ToInteger(); + DCHECK_IMPLIES(result, IsHeld()); + return result; +} + +void JSAtomicsMutex::SetCurrentThreadAsOwner() { + AtomicOwnerThreadIdPtr()->store(ThreadId::Current().ToInteger(), + std::memory_order_relaxed); +} + +void JSAtomicsMutex::ClearOwnerThread() { + AtomicOwnerThreadIdPtr()->store(ThreadId::Invalid().ToInteger(), + std::memory_order_relaxed); +} + +std::atomic* JSAtomicsMutex::AtomicStatePtr() { + StateT* state_ptr = reinterpret_cast(field_address(kStateOffset)); + DCHECK(IsAligned(reinterpret_cast(state_ptr), sizeof(StateT))); + return base::AsAtomicPtr(state_ptr); +} + +std::atomic* JSAtomicsMutex::AtomicOwnerThreadIdPtr() { + int32_t* owner_thread_id_ptr = + reinterpret_cast(field_address(kOwnerThreadIdOffset)); + return base::AsAtomicPtr(owner_thread_id_ptr); +} + +} // namespace internal +} // namespace v8 + +#include "src/objects/object-macros-undef.h" + +#endif // V8_OBJECTS_JS_ATOMICS_SYNCHRONIZATION_INL_H_ diff --git a/src/objects/js-atomics-synchronization.cc b/src/objects/js-atomics-synchronization.cc new file mode 100644 index 0000000000..d3851f60fb --- /dev/null +++ b/src/objects/js-atomics-synchronization.cc @@ -0,0 +1,274 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/objects/js-atomics-synchronization.h" + +#include "src/base/macros.h" +#include "src/base/platform/condition-variable.h" +#include "src/base/platform/mutex.h" +#include "src/base/platform/yield-processor.h" +#include "src/execution/isolate-inl.h" +#include "src/heap/parked-scope.h" +#include "src/objects/js-atomics-synchronization-inl.h" +#include "src/sandbox/external-pointer-inl.h" + +namespace v8 { +namespace internal { + +namespace detail { + +// To manage waiting threads, there is a process-wide doubly-linked intrusive +// list per waiter (i.e. mutex or condition variable). There is a per-thread +// node allocated on the stack when the thread goes to sleep during waiting. In +// the case of sandboxed pointers, the access to the on-stack node is indirected +// through the shared Isolate's external pointer table. +class V8_NODISCARD WaiterQueueNode final { + public: + explicit WaiterQueueNode(Isolate* requester) +#ifdef V8_SANDBOXED_EXTERNAL_POINTERS + : external_ptr_to_this(requester->EncodeWaiterQueueNodeAsExternalPointer( + reinterpret_cast
(this))) +#endif + { + } + + template + static typename T::StateT EncodeHead(WaiterQueueNode* head) { +#ifdef V8_SANDBOXED_EXTERNAL_POINTERS + if (head == nullptr) return 0; + auto state = static_cast(head->external_ptr_to_this); +#else + auto state = base::bit_cast(head); +#endif // V8_SANDBOXED_EXTERNAL_POINTERS + + DCHECK_EQ(0, state & T::kLockBitsMask); + return state; + } + + template + static WaiterQueueNode* DecodeHead(Isolate* requester, + typename T::StateT state) { +#ifdef V8_SANDBOXED_EXTERNAL_POINTERS + Isolate* shared_isolate = requester->shared_isolate(); + ExternalPointer_t ptr = + static_cast(state & T::kWaiterQueueHeadMask); + if (ptr == 0) return nullptr; + return reinterpret_cast( + DecodeExternalPointer(shared_isolate, ptr, kWaiterQueueNodeTag)); +#else + return base::bit_cast(state & T::kWaiterQueueHeadMask); +#endif // V8_SANDBOXED_EXTERNAL_POINTERS + } + + // Enqueues {new_tail}, mutating {head} to be the new head. + static void Enqueue(WaiterQueueNode** head, WaiterQueueNode* new_tail) { + DCHECK_NOT_NULL(head); + WaiterQueueNode* current_head = *head; + if (current_head == nullptr) { + new_tail->next_ = new_tail; + new_tail->prev_ = new_tail; + *head = new_tail; + } else { + WaiterQueueNode* current_tail = current_head->prev_; + current_tail->next_ = new_tail; + current_head->prev_ = new_tail; + new_tail->next_ = current_head; + new_tail->prev_ = current_tail; + } + } + + // Dequeues a waiter and returns it; {head} is mutated to be the new + // head. + static WaiterQueueNode* Dequeue(WaiterQueueNode** head) { + DCHECK_NOT_NULL(head); + DCHECK_NOT_NULL(*head); + WaiterQueueNode* current_head = *head; + WaiterQueueNode* new_head = current_head->next_; + if (new_head == current_head) { + *head = nullptr; + } else { + WaiterQueueNode* tail = current_head->prev_; + new_head->prev_ = tail; + tail->next_ = new_head; + *head = new_head; + } + return current_head; + } + + void Wait(Isolate* requester) { + AllowGarbageCollection allow_before_parking; + ParkedScope parked_scope(requester->main_thread_local_heap()); + base::MutexGuard guard(&wait_lock_); + while (should_wait) { + wait_cond_var_.Wait(&wait_lock_); + } + } + + void Notify() { + base::MutexGuard guard(&wait_lock_); + should_wait = false; + wait_cond_var_.NotifyOne(); + } + + bool should_wait = false; + +#ifdef V8_SANDBOXED_EXTERNAL_POINTERS + const ExternalPointer_t external_ptr_to_this; +#endif // V8_SANDBOXED_EXTERNAL_POINTERS + + private: + // The queue wraps around, e.g. the head's prev is the tail, and the tail's + // next is the head. + WaiterQueueNode* next_ = nullptr; + WaiterQueueNode* prev_ = nullptr; + + base::Mutex wait_lock_; + base::ConditionVariable wait_cond_var_; +}; + +} // namespace detail + +using detail::WaiterQueueNode; + +// static +Handle JSAtomicsMutex::Create(Isolate* isolate) { + auto* factory = isolate->factory(); + Handle map = isolate->js_atomics_mutex_map(); + Handle mutex = Handle::cast( + factory->NewSystemPointerAlignedJSObjectFromMap( + map, AllocationType::kSharedOld)); + mutex->set_state(kUnlocked); + mutex->set_owner_thread_id(ThreadId::Invalid().ToInteger()); + return mutex; +} + +bool JSAtomicsMutex::TryLockExplicit(std::atomic* state, + StateT& expected) { + // Try to lock a possibly contended mutex. + expected &= ~kIsLockedBit; + return state->compare_exchange_weak(expected, expected | kIsLockedBit, + std::memory_order_acquire, + std::memory_order_relaxed); +} + +bool JSAtomicsMutex::TryLockWaiterQueueExplicit(std::atomic* state, + StateT& expected) { + // The queue lock can only be acquired on a locked mutex. + DCHECK(expected & kIsLockedBit); + // Try to acquire the queue lock. + expected &= ~kIsWaiterQueueLockedBit; + return state->compare_exchange_weak( + expected, expected | kIsWaiterQueueLockedBit, std::memory_order_acquire, + std::memory_order_relaxed); +} + +// static +void JSAtomicsMutex::LockSlowPath(Isolate* requester, + Handle mutex, + std::atomic* state) { + for (;;) { + // Spin for a little bit to try to acquire the lock, so as to be fast under + // microcontention. + // + // The backoff algorithm is copied from PartitionAlloc's SpinningMutex. + constexpr int kSpinCount = 64; + constexpr int kMaxBackoff = 16; + + int tries = 0; + int backoff = 1; + StateT current_state = state->load(std::memory_order_relaxed); + do { + if (mutex->TryLockExplicit(state, current_state)) return; + + for (int yields = 0; yields < backoff; yields++) { + YIELD_PROCESSOR; + tries++; + } + + backoff = std::min(kMaxBackoff, backoff << 1); + } while (tries < kSpinCount); + + // At this point the lock is considered contended, so try to go to sleep and + // put the requester thread on the waiter queue. + + // Allocate a waiter queue node on-stack, since this thread is going to + // sleep and will be blocked anyaway. + WaiterQueueNode this_waiter(requester); + + { + // Try to acquire the queue lock, which is itself a spinlock. + current_state = state->load(std::memory_order_relaxed); + for (;;) { + if ((current_state & kIsLockedBit) && + mutex->TryLockWaiterQueueExplicit(state, current_state)) { + break; + } + // Also check for the lock having been released by another thread during + // attempts to acquire the queue lock. + if (mutex->TryLockExplicit(state, current_state)) return; + YIELD_PROCESSOR; + } + + // With the queue lock held, enqueue the requester onto the waiter queue. + this_waiter.should_wait = true; + WaiterQueueNode* waiter_head = + WaiterQueueNode::DecodeHead(requester, current_state); + WaiterQueueNode::Enqueue(&waiter_head, &this_waiter); + + // Release the queue lock and install the new waiter queue head by + // creating a new state. + DCHECK_EQ(state->load(), current_state | kIsWaiterQueueLockedBit); + StateT new_state = + WaiterQueueNode::EncodeHead(waiter_head); + // The lock is held, just not by us, so don't set the current thread id as + // the owner. + DCHECK(current_state & kIsLockedBit); + DCHECK(!mutex->IsCurrentThreadOwner()); + new_state |= kIsLockedBit; + state->store(new_state, std::memory_order_release); + } + + // Wait for another thread to release the lock and wake us up. + this_waiter.Wait(requester); + + // Reload the state pointer after wake up in case of shared GC while + // blocked. + state = mutex->AtomicStatePtr(); + + // After wake up we try to acquire the lock again by spinning, as the + // contention at the point of going to sleep should not be correlated with + // contention at the point of waking up. + } +} + +void JSAtomicsMutex::UnlockSlowPath(Isolate* requester, + std::atomic* state) { + // The fast path unconditionally cleared the owner thread. + DCHECK_EQ(ThreadId::Invalid().ToInteger(), + AtomicOwnerThreadIdPtr()->load(std::memory_order_relaxed)); + + // To wake a sleeping thread, first acquire the queue lock, which is itself + // a spinlock. + StateT current_state = state->load(std::memory_order_relaxed); + while (!TryLockWaiterQueueExplicit(state, current_state)) { + YIELD_PROCESSOR; + } + + // Get the waiter queue head, which is guaranteed to be non-null because the + // unlock fast path uses a strong CAS which does not allow spurious + // failure. This is unlike the lock fast path, which uses a weak CAS. + WaiterQueueNode* waiter_head = + WaiterQueueNode::DecodeHead(requester, current_state); + WaiterQueueNode* old_head = WaiterQueueNode::Dequeue(&waiter_head); + + // Release both the lock and the queue lock and also install the new waiter + // queue head by creating a new state. + StateT new_state = WaiterQueueNode::EncodeHead(waiter_head); + state->store(new_state, std::memory_order_release); + + old_head->Notify(); +} + +} // namespace internal +} // namespace v8 diff --git a/src/objects/js-atomics-synchronization.h b/src/objects/js-atomics-synchronization.h new file mode 100644 index 0000000000..fd5a8b1129 --- /dev/null +++ b/src/objects/js-atomics-synchronization.h @@ -0,0 +1,117 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_OBJECTS_JS_ATOMICS_SYNCHRONIZATION_H_ +#define V8_OBJECTS_JS_ATOMICS_SYNCHRONIZATION_H_ + +#include + +#include "src/execution/thread-id.h" +#include "src/objects/js-objects.h" + +// Has to be the last include (doesn't have include guards): +#include "src/objects/object-macros.h" + +namespace v8 { +namespace internal { + +#include "torque-generated/src/objects/js-atomics-synchronization-tq.inc" + +namespace detail { +class WaiterQueueNode; +} // namespace detail + +// A non-recursive mutex that is exposed to JS. +// +// It has the following properties: +// - Slim: 8-12 bytes. Lock state is 4 bytes when +// V8_SANDBOXED_EXTERNAL_POINTERS, and sizeof(void*) otherwise. Owner +// thread is an additional 4 bytes. +// - Fast when uncontended: a single weak CAS. +// - Possibly unfair under contention. +// - Moving GC safe. It uses an index into the shared Isolate's external +// pointer table to store a queue of sleeping threads. +// - Parks the main thread LocalHeap when the thread is blocked on acquiring +// the lock. Unparks the main thread LocalHeap when unblocked. This means +// that the lock can only be used with main thread isolates (including +// workers) but not with helper threads that have their own LocalHeap. +// +// This mutex manages its own queue of waiting threads under contention, i.e. a +// it implements a futex in userland. The algorithm is inspired by WebKit's +// ParkingLot. +class JSAtomicsMutex + : public TorqueGeneratedJSAtomicsMutex { + public: + DECL_CAST(JSAtomicsMutex) + DECL_PRINTER(JSAtomicsMutex) + EXPORT_DECL_VERIFIER(JSAtomicsMutex) + + V8_EXPORT_PRIVATE static Handle Create(Isolate* isolate); + + // Lock the mutex, blocking if it's currently owned by another thread. + static inline void Lock(Isolate* requester, Handle mutex); + + V8_WARN_UNUSED_RESULT inline bool TryLock(); + + inline void Unlock(Isolate* requester); + + inline bool IsHeld(); + inline bool IsCurrentThreadOwner(); + + static constexpr int kEndOfTaggedFieldsOffset = JSObject::kHeaderSize; + class BodyDescriptor; + + TQ_OBJECT_CONSTRUCTORS(JSAtomicsMutex) + + private: + friend class detail::WaiterQueueNode; + + // There are 2 lock bits: whether the lock itself is locked, and whether the + // associated waiter queue is locked. + static constexpr int kIsLockedBit = 1 << 0; + static constexpr int kIsWaiterQueueLockedBit = 1 << 1; + static constexpr int kLockBitsSize = 2; + +#ifdef V8_SANDBOXED_EXTERNAL_POINTERS + using StateT = uint32_t; + static_assert(sizeof(StateT) == kExternalPointerSize); +#else + using StateT = uintptr_t; +#endif + + static constexpr StateT kUnlocked = 0; + static constexpr StateT kLockedUncontended = 1; + + static constexpr StateT kLockBitsMask = (1 << kLockBitsSize) - 1; + static constexpr StateT kWaiterQueueHeadMask = ~kLockBitsMask; + + inline void SetCurrentThreadAsOwner(); + inline void ClearOwnerThread(); + + inline std::atomic* AtomicStatePtr(); + inline std::atomic* AtomicOwnerThreadIdPtr(); + + bool TryLockExplicit(std::atomic* state, StateT& expected); + bool TryLockWaiterQueueExplicit(std::atomic* state, StateT& expected); + + V8_EXPORT_PRIVATE static void LockSlowPath(Isolate* requester, + Handle mutex, + std::atomic* state); + V8_EXPORT_PRIVATE void UnlockSlowPath(Isolate* requester, + std::atomic* state); + + using TorqueGeneratedJSAtomicsMutex::state; + using TorqueGeneratedJSAtomicsMutex::set_state; + using TorqueGeneratedJSAtomicsMutex::owner_thread_id; + using TorqueGeneratedJSAtomicsMutex::set_owner_thread_id; +}; + +} // namespace internal +} // namespace v8 + +#include "src/objects/object-macros-undef.h" + +#endif // V8_OBJECTS_JS_ATOMICS_SYNCHRONIZATION_H_ diff --git a/src/objects/js-atomics-synchronization.tq b/src/objects/js-atomics-synchronization.tq new file mode 100644 index 0000000000..1419874155 --- /dev/null +++ b/src/objects/js-atomics-synchronization.tq @@ -0,0 +1,15 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +extern class JSAtomicsMutex extends JSObject { + // owner_thread_id must come first to ensure that the state field is + // uintptr-aligned. + @if(V8_NOT_SANDBOXED_EXTERNAL_POINTERS_AND_TAGGED_SIZE_8_BYTES) + owner_thread_id: intptr; + @ifnot(V8_NOT_SANDBOXED_EXTERNAL_POINTERS_AND_TAGGED_SIZE_8_BYTES) + owner_thread_id: int32; + + @if(V8_SANDBOXED_EXTERNAL_POINTERS) state: uint32; + @ifnot(V8_SANDBOXED_EXTERNAL_POINTERS) state: uintptr; +} diff --git a/src/objects/js-objects.cc b/src/objects/js-objects.cc index 9db3629f15..917285b03b 100644 --- a/src/objects/js-objects.cc +++ b/src/objects/js-objects.cc @@ -30,6 +30,7 @@ #include "src/objects/heap-object.h" #include "src/objects/js-array-buffer-inl.h" #include "src/objects/js-array-inl.h" +#include "src/objects/js-atomics-synchronization.h" #include "src/objects/lookup.h" #include "src/objects/map-updater.h" #include "src/objects/objects-inl.h" @@ -2460,6 +2461,8 @@ int JSObject::GetHeaderSize(InstanceType type, return JSModuleNamespace::kHeaderSize; case JS_SHARED_STRUCT_TYPE: return JSSharedStruct::kHeaderSize; + case JS_ATOMICS_MUTEX_TYPE: + return JSAtomicsMutex::kHeaderSize; case JS_TEMPORAL_CALENDAR_TYPE: return JSTemporalCalendar::kHeaderSize; case JS_TEMPORAL_DURATION_TYPE: diff --git a/src/objects/map.cc b/src/objects/map.cc index cf2a4c73f0..967ba60519 100644 --- a/src/objects/map.cc +++ b/src/objects/map.cc @@ -281,6 +281,7 @@ VisitorId Map::GetVisitorId(Map map) { case JS_SET_VALUE_ITERATOR_TYPE: case JS_SHADOW_REALM_TYPE: case JS_SHARED_STRUCT_TYPE: + case JS_ATOMICS_MUTEX_TYPE: case JS_STRING_ITERATOR_PROTOTYPE_TYPE: case JS_STRING_ITERATOR_TYPE: case JS_TEMPORAL_CALENDAR_TYPE: diff --git a/src/objects/object-list-macros.h b/src/objects/object-list-macros.h index 6c98766f29..208b8fe68f 100644 --- a/src/objects/object-list-macros.h +++ b/src/objects/object-list-macros.h @@ -131,6 +131,7 @@ class ZoneForwardList; V(JSAsyncFromSyncIterator) \ V(JSAsyncFunctionObject) \ V(JSAsyncGeneratorObject) \ + V(JSAtomicsMutex) \ V(JSBoundFunction) \ V(JSCollection) \ V(JSCollectionIterator) \ diff --git a/src/objects/objects-body-descriptors-inl.h b/src/objects/objects-body-descriptors-inl.h index edd5445b86..d236b71a54 100644 --- a/src/objects/objects-body-descriptors-inl.h +++ b/src/objects/objects-body-descriptors-inl.h @@ -20,6 +20,7 @@ #include "src/objects/free-space-inl.h" #include "src/objects/hash-table.h" #include "src/objects/heap-number.h" +#include "src/objects/js-atomics-synchronization.h" #include "src/objects/js-collection.h" #include "src/objects/js-weak-refs.h" #include "src/objects/literal-objects.h" @@ -663,6 +664,26 @@ class JSWeakCollection::BodyDescriptorImpl final : public BodyDescriptorBase { } }; +class JSAtomicsMutex::BodyDescriptor final : public BodyDescriptorBase { + public: + static bool IsValidSlot(Map map, HeapObject obj, int offset) { + if (offset < kEndOfTaggedFieldsOffset) return true; + if (offset < kHeaderSize) return false; + return IsValidJSObjectSlotImpl(map, obj, offset); + } + + template + static inline void IterateBody(Map map, HeapObject obj, int object_size, + ObjectVisitor* v) { + IteratePointers(obj, kPropertiesOrHashOffset, kEndOfTaggedFieldsOffset, v); + IterateJSObjectBodyImpl(map, obj, kHeaderSize, object_size, v); + } + + static inline int SizeOf(Map map, HeapObject object) { + return map.instance_size(); + } +}; + class Foreign::BodyDescriptor final : public BodyDescriptorBase { public: static bool IsValidSlot(Map map, HeapObject obj, int offset) { return false; } @@ -1254,6 +1275,8 @@ auto BodyDescriptorApply(InstanceType type, Args&&... args) { return CALL_APPLY(JSWeakRef); case JS_PROXY_TYPE: return CALL_APPLY(JSProxy); + case JS_ATOMICS_MUTEX_TYPE: + return CALL_APPLY(JSAtomicsMutex); case FOREIGN_TYPE: return CALL_APPLY(Foreign); case MAP_TYPE: diff --git a/src/objects/objects-inl.h b/src/objects/objects-inl.h index 6d431b63a9..4dfde12920 100644 --- a/src/objects/objects-inl.h +++ b/src/objects/objects-inl.h @@ -1180,6 +1180,7 @@ bool Object::IsShared() const { case SHARED_STRING_TYPE: case SHARED_ONE_BYTE_STRING_TYPE: case JS_SHARED_STRUCT_TYPE: + case JS_ATOMICS_MUTEX_TYPE: DCHECK(object.InSharedHeap()); return true; case INTERNALIZED_STRING_TYPE: diff --git a/src/objects/value-serializer.cc b/src/objects/value-serializer.cc index 37de24f0da..56dfb9d997 100644 --- a/src/objects/value-serializer.cc +++ b/src/objects/value-serializer.cc @@ -593,6 +593,8 @@ Maybe ValueSerializer::WriteJSReceiver(Handle receiver) { return WriteJSError(Handle::cast(receiver)); case JS_SHARED_STRUCT_TYPE: return WriteJSSharedStruct(Handle::cast(receiver)); + case JS_ATOMICS_MUTEX_TYPE: + return WriteSharedObject(receiver); #if V8_ENABLE_WEBASSEMBLY case WASM_MODULE_OBJECT_TYPE: return WriteWasmModule(Handle::cast(receiver)); diff --git a/src/torque/torque-parser.cc b/src/torque/torque-parser.cc index 5670619327..447954cdd1 100644 --- a/src/torque/torque-parser.cc +++ b/src/torque/torque-parser.cc @@ -67,6 +67,10 @@ class BuildFlags : public ContextualClass { #else build_flags_["V8_ENABLE_WEBASSEMBLY"] = false; #endif + build_flags_["V8_SANDBOXED_EXTERNAL_POINTERS"] = + V8_SANDBOXED_EXTERNAL_POINTERS_BOOL; + build_flags_["V8_NOT_SANDBOXED_EXTERNAL_POINTERS_AND_TAGGED_SIZE_8_BYTES"] = + !V8_SANDBOXED_EXTERNAL_POINTERS_BOOL && TAGGED_SIZE_8_BYTES; build_flags_["DEBUG"] = DEBUG_BOOL; } static bool GetFlag(const std::string& name, const char* production) { diff --git a/test/mjsunit/shared-memory/mutex-workers.js b/test/mjsunit/shared-memory/mutex-workers.js new file mode 100644 index 0000000000..c2986b9d0d --- /dev/null +++ b/test/mjsunit/shared-memory/mutex-workers.js @@ -0,0 +1,45 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// +// Flags: --harmony-struct --allow-natives-syntax + +"use strict"; + +if (this.Worker) { + +(function TestMutexWorkers() { + let workerScript = + `onmessage = function(msg) { + let mutex = msg.mutex; + let box = msg.box; + for (let i = 0; i < 10; i++) { + Atomics.Mutex.lock(mutex, function() { + box.counter++; + }); + } + postMessage("done"); + }; + postMessage("started");`; + + let worker1 = new Worker(workerScript, { type: 'string' }); + let worker2 = new Worker(workerScript, { type: 'string' }); + assertEquals("started", worker1.getMessage()); + assertEquals("started", worker2.getMessage()); + + let Box = new SharedStructType(['counter']); + let box = new Box(); + box.counter = 0; + let mutex = new Atomics.Mutex(); + let msg = { mutex, box }; + worker1.postMessage(msg); + worker2.postMessage(msg); + assertEquals("done", worker1.getMessage()); + assertEquals("done", worker2.getMessage()); + assertEquals(20, box.counter); + + worker1.terminate(); + worker2.terminate(); +})(); + +} diff --git a/test/mjsunit/shared-memory/mutex.js b/test/mjsunit/shared-memory/mutex.js new file mode 100644 index 0000000000..49bcb3b5bf --- /dev/null +++ b/test/mjsunit/shared-memory/mutex.js @@ -0,0 +1,33 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// +// Flags: --harmony-struct + +let mutex = new Atomics.Mutex; +let locked_count = 0; + +assertEquals(42, Atomics.Mutex.lock(mutex, () => { + locked_count++; return 42; +})); +assertEquals(locked_count, 1); + +// tryLock returns true when successful. +assertTrue(Atomics.Mutex.tryLock(mutex, () => { locked_count++; })); +assertEquals(locked_count, 2); + +// Recursively locking throws. +Atomics.Mutex.lock(mutex, () => { + locked_count++; + assertThrows(() => { + Atomics.Mutex.lock(mutex, () => { throw "unreachable"; }); + }, Error); +}); +assertEquals(locked_count, 3); + +// Recursive tryLock'ing returns false. +Atomics.Mutex.lock(mutex, () => { + locked_count++; + assertFalse(Atomics.Mutex.tryLock(mutex, () => { throw "unreachable"; })); +}); +assertEquals(locked_count, 4); diff --git a/test/unittests/BUILD.gn b/test/unittests/BUILD.gn index 15079c9191..9e718e703f 100644 --- a/test/unittests/BUILD.gn +++ b/test/unittests/BUILD.gn @@ -384,6 +384,7 @@ v8_source_set("unittests_sources") { "interpreter/constant-array-builder-unittest.cc", "interpreter/interpreter-assembler-unittest.cc", "interpreter/interpreter-assembler-unittest.h", + "js-atomics/js-atomics-mutex-unittest.cc", "libplatform/default-job-unittest.cc", "libplatform/default-platform-unittest.cc", "libplatform/default-worker-threads-task-runner-unittest.cc", diff --git a/test/unittests/js-atomics/js-atomics-mutex-unittest.cc b/test/unittests/js-atomics/js-atomics-mutex-unittest.cc new file mode 100644 index 0000000000..a6ece40fe6 --- /dev/null +++ b/test/unittests/js-atomics/js-atomics-mutex-unittest.cc @@ -0,0 +1,116 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/base/platform/platform.h" +#include "src/base/platform/semaphore.h" +#include "src/base/platform/time.h" +#include "src/objects/js-atomics-synchronization-inl.h" +#include "test/unittests/test-utils.h" +#include "testing/gtest/include/gtest/gtest.h" + +namespace v8 { +namespace internal { + +using JSAtomicsMutexTest = TestWithSharedIsolate; + +namespace { + +class ClientIsolateWithContextWrapper final { + public: + explicit ClientIsolateWithContextWrapper(v8::Isolate* shared_isolate) + : client_isolate_wrapper_(kNoCounters, kClientIsolate, shared_isolate), + isolate_scope_(client_isolate_wrapper_.isolate()), + handle_scope_(client_isolate_wrapper_.isolate()), + context_(v8::Context::New(client_isolate_wrapper_.isolate())), + context_scope_(context_) {} + + v8::Isolate* v8_isolate() const { return client_isolate_wrapper_.isolate(); } + Isolate* isolate() const { return reinterpret_cast(v8_isolate()); } + + private: + IsolateWrapper client_isolate_wrapper_; + v8::Isolate::Scope isolate_scope_; + v8::HandleScope handle_scope_; + v8::Local context_; + v8::Context::Scope context_scope_; +}; + +class LockingThread final : public v8::base::Thread { + public: + LockingThread(v8::Isolate* shared_isolate, Handle mutex, + base::Semaphore* sema_ready, + base::Semaphore* sema_execute_start, + base::Semaphore* sema_execute_complete) + : Thread(Options("ThreadWithAtomicsMutex")), + shared_isolate_(shared_isolate), + mutex_(mutex), + sema_ready_(sema_ready), + sema_execute_start_(sema_execute_start), + sema_execute_complete_(sema_execute_complete) {} + + void Run() override { + ClientIsolateWithContextWrapper client_isolate_wrapper(shared_isolate_); + Isolate* isolate = client_isolate_wrapper.isolate(); + + sema_ready_->Signal(); + sema_execute_start_->Wait(); + + HandleScope scope(isolate); + JSAtomicsMutex::Lock(isolate, mutex_); + EXPECT_TRUE(mutex_->IsHeld()); + EXPECT_TRUE(mutex_->IsCurrentThreadOwner()); + base::OS::Sleep(base::TimeDelta::FromMilliseconds(1)); + mutex_->Unlock(isolate); + + sema_execute_complete_->Signal(); + } + + protected: + v8::Isolate* shared_isolate_; + Handle mutex_; + base::Semaphore* sema_ready_; + base::Semaphore* sema_execute_start_; + base::Semaphore* sema_execute_complete_; +}; + +} // namespace + +TEST_F(JSAtomicsMutexTest, Contention) { + if (!ReadOnlyHeap::IsReadOnlySpaceShared()) return; + if (!COMPRESS_POINTERS_IN_SHARED_CAGE_BOOL) return; + + FLAG_harmony_struct = true; + + v8::Isolate* shared_isolate = v8_isolate(); + ClientIsolateWithContextWrapper client_isolate_wrapper(shared_isolate); + + constexpr int kThreads = 32; + + Handle contended_mutex = + JSAtomicsMutex::Create(client_isolate_wrapper.isolate()); + base::Semaphore sema_ready(0); + base::Semaphore sema_execute_start(0); + base::Semaphore sema_execute_complete(0); + std::vector> threads; + for (int i = 0; i < kThreads; i++) { + auto thread = std::make_unique( + shared_isolate, contended_mutex, &sema_ready, &sema_execute_start, + &sema_execute_complete); + CHECK(thread->Start()); + threads.push_back(std::move(thread)); + } + + for (int i = 0; i < kThreads; i++) sema_ready.Wait(); + for (int i = 0; i < kThreads; i++) sema_execute_start.Signal(); + for (int i = 0; i < kThreads; i++) sema_execute_complete.Wait(); + + for (auto& thread : threads) { + thread->Join(); + } + + EXPECT_FALSE(contended_mutex->IsHeld()); +} + +} // namespace internal +} // namespace v8 diff --git a/test/unittests/test-utils.cc b/test/unittests/test-utils.cc index 2f6fc83f7a..c3e4b993a8 100644 --- a/test/unittests/test-utils.cc +++ b/test/unittests/test-utils.cc @@ -22,7 +22,9 @@ namespace { CounterMap* kCurrentCounterMap = nullptr; } // namespace -IsolateWrapper::IsolateWrapper(CountersMode counters_mode) +IsolateWrapper::IsolateWrapper(CountersMode counters_mode, + IsolateSharedMode shared_mode, + v8::Isolate* shared_isolate_if_client) : array_buffer_allocator_( v8::ArrayBuffer::Allocator::NewDefaultAllocator()) { CHECK_NULL(kCurrentCounterMap); @@ -46,7 +48,17 @@ IsolateWrapper::IsolateWrapper(CountersMode counters_mode) }; } - isolate_ = v8::Isolate::New(create_params); + if (shared_mode == kSharedIsolate) { + isolate_ = reinterpret_cast( + internal::Isolate::NewShared(create_params)); + } else { + if (shared_mode == kClientIsolate) { + CHECK_NOT_NULL(shared_isolate_if_client); + create_params.experimental_attach_to_shared_isolate = + shared_isolate_if_client; + } + isolate_ = v8::Isolate::New(create_params); + } CHECK_NOT_NULL(isolate()); } diff --git a/test/unittests/test-utils.h b/test/unittests/test-utils.h index dd8b770d6f..761131234e 100644 --- a/test/unittests/test-utils.h +++ b/test/unittests/test-utils.h @@ -59,10 +59,18 @@ using CounterMap = std::map; enum CountersMode { kNoCounters, kEnableCounters }; +enum IsolateSharedMode { kStandaloneIsolate, kSharedIsolate, kClientIsolate }; + // RAII-like Isolate instance wrapper. +// +// It is the caller's responsibility to ensure that the shared Isolate outlives +// all client Isolates. class IsolateWrapper final { public: - explicit IsolateWrapper(CountersMode counters_mode); + IsolateWrapper(CountersMode counters_mode, + IsolateSharedMode shared_mode = kStandaloneIsolate, + v8::Isolate* shared_isolate_if_client = nullptr); + ~IsolateWrapper(); IsolateWrapper(const IsolateWrapper&) = delete; IsolateWrapper& operator=(const IsolateWrapper&) = delete; @@ -78,10 +86,11 @@ class IsolateWrapper final { // // A set of mixins from which the test fixtures will be constructed. // -template +template class WithIsolateMixin : public TMixin { public: - WithIsolateMixin() : isolate_wrapper_(kCountersMode) {} + WithIsolateMixin() : isolate_wrapper_(kCountersMode, kSharedMode) {} v8::Isolate* v8_isolate() const { return isolate_wrapper_.isolate(); } @@ -386,6 +395,11 @@ using TestWithNativeContextAndZone = // WithDefaultPlatformMixin< // ::testing::Test>>>>>>; +using TestWithSharedIsolate = // + WithIsolateMixin< // + WithDefaultPlatformMixin<::testing::Test>, // + kNoCounters, kSharedIsolate>; + class V8_NODISCARD SaveFlags { public: SaveFlags(); diff --git a/tools/v8heapconst.py b/tools/v8heapconst.py index 5c43a7bfbb..993785ec03 100644 --- a/tools/v8heapconst.py +++ b/tools/v8heapconst.py @@ -229,48 +229,49 @@ INSTANCE_TYPES = { 2104: "JS_ARRAY_TYPE", 2105: "JS_ARRAY_ITERATOR_TYPE", 2106: "JS_ASYNC_FROM_SYNC_ITERATOR_TYPE", - 2107: "JS_COLLATOR_TYPE", - 2108: "JS_CONTEXT_EXTENSION_OBJECT_TYPE", - 2109: "JS_DATE_TYPE", - 2110: "JS_DATE_TIME_FORMAT_TYPE", - 2111: "JS_DISPLAY_NAMES_TYPE", - 2112: "JS_ERROR_TYPE", - 2113: "JS_EXTERNAL_OBJECT_TYPE", - 2114: "JS_FINALIZATION_REGISTRY_TYPE", - 2115: "JS_LIST_FORMAT_TYPE", - 2116: "JS_LOCALE_TYPE", - 2117: "JS_MESSAGE_OBJECT_TYPE", - 2118: "JS_NUMBER_FORMAT_TYPE", - 2119: "JS_PLURAL_RULES_TYPE", - 2120: "JS_REG_EXP_TYPE", - 2121: "JS_REG_EXP_STRING_ITERATOR_TYPE", - 2122: "JS_RELATIVE_TIME_FORMAT_TYPE", - 2123: "JS_SEGMENT_ITERATOR_TYPE", - 2124: "JS_SEGMENTER_TYPE", - 2125: "JS_SEGMENTS_TYPE", - 2126: "JS_SHADOW_REALM_TYPE", - 2127: "JS_SHARED_STRUCT_TYPE", - 2128: "JS_STRING_ITERATOR_TYPE", - 2129: "JS_TEMPORAL_CALENDAR_TYPE", - 2130: "JS_TEMPORAL_DURATION_TYPE", - 2131: "JS_TEMPORAL_INSTANT_TYPE", - 2132: "JS_TEMPORAL_PLAIN_DATE_TYPE", - 2133: "JS_TEMPORAL_PLAIN_DATE_TIME_TYPE", - 2134: "JS_TEMPORAL_PLAIN_MONTH_DAY_TYPE", - 2135: "JS_TEMPORAL_PLAIN_TIME_TYPE", - 2136: "JS_TEMPORAL_PLAIN_YEAR_MONTH_TYPE", - 2137: "JS_TEMPORAL_TIME_ZONE_TYPE", - 2138: "JS_TEMPORAL_ZONED_DATE_TIME_TYPE", - 2139: "JS_V8_BREAK_ITERATOR_TYPE", - 2140: "JS_WEAK_REF_TYPE", - 2141: "WASM_GLOBAL_OBJECT_TYPE", - 2142: "WASM_INSTANCE_OBJECT_TYPE", - 2143: "WASM_MEMORY_OBJECT_TYPE", - 2144: "WASM_MODULE_OBJECT_TYPE", - 2145: "WASM_SUSPENDER_OBJECT_TYPE", - 2146: "WASM_TABLE_OBJECT_TYPE", - 2147: "WASM_TAG_OBJECT_TYPE", - 2148: "WASM_VALUE_OBJECT_TYPE", + 2107: "JS_ATOMICS_MUTEX_TYPE", + 2108: "JS_COLLATOR_TYPE", + 2109: "JS_CONTEXT_EXTENSION_OBJECT_TYPE", + 2110: "JS_DATE_TYPE", + 2111: "JS_DATE_TIME_FORMAT_TYPE", + 2112: "JS_DISPLAY_NAMES_TYPE", + 2113: "JS_ERROR_TYPE", + 2114: "JS_EXTERNAL_OBJECT_TYPE", + 2115: "JS_FINALIZATION_REGISTRY_TYPE", + 2116: "JS_LIST_FORMAT_TYPE", + 2117: "JS_LOCALE_TYPE", + 2118: "JS_MESSAGE_OBJECT_TYPE", + 2119: "JS_NUMBER_FORMAT_TYPE", + 2120: "JS_PLURAL_RULES_TYPE", + 2121: "JS_REG_EXP_TYPE", + 2122: "JS_REG_EXP_STRING_ITERATOR_TYPE", + 2123: "JS_RELATIVE_TIME_FORMAT_TYPE", + 2124: "JS_SEGMENT_ITERATOR_TYPE", + 2125: "JS_SEGMENTER_TYPE", + 2126: "JS_SEGMENTS_TYPE", + 2127: "JS_SHADOW_REALM_TYPE", + 2128: "JS_SHARED_STRUCT_TYPE", + 2129: "JS_STRING_ITERATOR_TYPE", + 2130: "JS_TEMPORAL_CALENDAR_TYPE", + 2131: "JS_TEMPORAL_DURATION_TYPE", + 2132: "JS_TEMPORAL_INSTANT_TYPE", + 2133: "JS_TEMPORAL_PLAIN_DATE_TYPE", + 2134: "JS_TEMPORAL_PLAIN_DATE_TIME_TYPE", + 2135: "JS_TEMPORAL_PLAIN_MONTH_DAY_TYPE", + 2136: "JS_TEMPORAL_PLAIN_TIME_TYPE", + 2137: "JS_TEMPORAL_PLAIN_YEAR_MONTH_TYPE", + 2138: "JS_TEMPORAL_TIME_ZONE_TYPE", + 2139: "JS_TEMPORAL_ZONED_DATE_TIME_TYPE", + 2140: "JS_V8_BREAK_ITERATOR_TYPE", + 2141: "JS_WEAK_REF_TYPE", + 2142: "WASM_GLOBAL_OBJECT_TYPE", + 2143: "WASM_INSTANCE_OBJECT_TYPE", + 2144: "WASM_MEMORY_OBJECT_TYPE", + 2145: "WASM_MODULE_OBJECT_TYPE", + 2146: "WASM_SUSPENDER_OBJECT_TYPE", + 2147: "WASM_TABLE_OBJECT_TYPE", + 2148: "WASM_TAG_OBJECT_TYPE", + 2149: "WASM_VALUE_OBJECT_TYPE", } # List of known V8 maps. @@ -449,8 +450,8 @@ KNOWN_MAPS = { ("read_only_space", 0x06a65): (138, "StoreHandler1Map"), ("read_only_space", 0x06a8d): (138, "StoreHandler2Map"), ("read_only_space", 0x06ab5): (138, "StoreHandler3Map"), - ("map_space", 0x02149): (2113, "ExternalMap"), - ("map_space", 0x02171): (2117, "JSMessageObjectMap"), + ("map_space", 0x02149): (2114, "ExternalMap"), + ("map_space", 0x02171): (2118, "JSMessageObjectMap"), } # List of known V8 objects.