From b1020a4345bae1be20a0b8a4ab2c8446c37a904c Mon Sep 17 00:00:00 2001 From: Shu-yu Guo Date: Mon, 8 Aug 2022 16:19:43 -0700 Subject: [PATCH] Reland "[shared-struct] Add Atomics.Condition" MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is a reland of commit e2066ff6bff8c09cac5146c2c6710c2ac61216c6 Changes since revert: - Rebased against c99185249186a18c43e935e7f32da898e6b321e2, which uses the external pointer table for the WaiterQueueNode stored in the state field when compressing pointers. This relaxes the alignment requirement of the state field to be 4-bytes when compressing pointers. - Moved the state field into the JSSynchronizationPrimitive base class, since alignment and padding can now be made simpler. Original change's description: > [shared-struct] Add Atomics.Condition > > Bug: v8:12547 > Change-Id: Id439aef9cab3348171a23378cdd47ede5f4d7288 > Cq-Include-Trybots: luci.v8.try:v8_linux_arm64_rel_ng,v8_linux64_tsan_rel_ng > Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3630350 > Reviewed-by: Dominik Inführ > Reviewed-by: Adam Klein > Commit-Queue: Shu-yu Guo > Cr-Commit-Position: refs/heads/main@{#81734} Bug: v8:12547 Change-Id: I638304c3d5722c64bd04708ed4cf84863cdebb81 Cq-Include-Trybots: luci.v8.try:v8_linux_arm64_rel_ng,v8_linux64_tsan_rel_ng Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3763787 Reviewed-by: Adam Klein Commit-Queue: Shu-yu Guo Reviewed-by: Dominik Inführ Cr-Commit-Position: refs/heads/main@{#82278} --- .../builtins-atomics-synchronization.cc | 103 +++++++- src/builtins/builtins-definitions.h | 3 + src/builtins/builtins-sharedarraybuffer.cc | 4 +- src/common/message-template.h | 6 +- src/compiler/types.cc | 1 + src/diagnostics/objects-debug.cc | 13 +- src/diagnostics/objects-printer.cc | 9 + src/heap/objects-visiting.h | 2 +- src/init/bootstrapper.cc | 25 +- src/objects/contexts.h | 1 + src/objects/js-atomics-synchronization-inl.h | 19 +- src/objects/js-atomics-synchronization.cc | 248 +++++++++++++++++- src/objects/js-atomics-synchronization.h | 106 ++++++-- src/objects/js-atomics-synchronization.tq | 7 +- src/objects/js-objects.cc | 2 + src/objects/map.cc | 8 +- src/objects/map.h | 2 +- src/objects/object-list-macros.h | 2 + src/objects/objects-body-descriptors-inl.h | 7 +- src/objects/objects-inl.h | 1 + src/objects/value-serializer.cc | 1 + src/runtime/runtime-test.cc | 8 + src/runtime/runtime-wasm.cc | 14 +- src/runtime/runtime.h | 3 +- .../shared-memory/condition-workers.js | 43 +++ test/mjsunit/shared-memory/condition.js | 36 +++ test/unittests/BUILD.gn | 2 +- ...ics-synchronization-primitive-unittest.cc} | 134 +++++++++- tools/v8heapconst.py | 107 ++++---- 29 files changed, 793 insertions(+), 124 deletions(-) create mode 100644 test/mjsunit/shared-memory/condition-workers.js create mode 100644 test/mjsunit/shared-memory/condition.js rename test/unittests/js-atomics/{js-atomics-mutex-unittest.cc => js-atomics-synchronization-primitive-unittest.cc} (51%) diff --git a/src/builtins/builtins-atomics-synchronization.cc b/src/builtins/builtins-atomics-synchronization.cc index 0ce55b5240..67e5672693 100644 --- a/src/builtins/builtins-atomics-synchronization.cc +++ b/src/builtins/builtins-atomics-synchronization.cc @@ -29,8 +29,8 @@ BUILTIN(AtomicsMutexLock) { Handle js_mutex = Handle::cast(js_mutex_obj); Handle run_under_lock = args.atOrUndefined(isolate, 2); if (!run_under_lock->IsCallable()) { - THROW_NEW_ERROR_RETURN_FAILURE(isolate, - NewTypeError(MessageTemplate::kNotCallable)); + THROW_NEW_ERROR_RETURN_FAILURE( + isolate, NewTypeError(MessageTemplate::kNotCallable, run_under_lock)); } // Like Atomics.wait, synchronous locking may block, and so is disallowed on @@ -39,7 +39,9 @@ BUILTIN(AtomicsMutexLock) { // This is not a recursive lock, so also throw if recursively locking. if (!isolate->allow_atomics_wait() || js_mutex->IsCurrentThreadOwner()) { THROW_NEW_ERROR_RETURN_FAILURE( - isolate, NewTypeError(MessageTemplate::kAtomicsMutexLockNotAllowed)); + isolate, NewTypeError(MessageTemplate::kAtomicsOperationNotAllowed, + isolate->factory()->NewStringFromAsciiChecked( + method_name))); } Handle result; @@ -69,8 +71,8 @@ BUILTIN(AtomicsMutexTryLock) { Handle js_mutex = Handle::cast(js_mutex_obj); Handle run_under_lock = args.atOrUndefined(isolate, 2); if (!run_under_lock->IsCallable()) { - THROW_NEW_ERROR_RETURN_FAILURE(isolate, - NewTypeError(MessageTemplate::kNotCallable)); + THROW_NEW_ERROR_RETURN_FAILURE( + isolate, NewTypeError(MessageTemplate::kNotCallable, run_under_lock)); } JSAtomicsMutex::TryLockGuard try_lock_guard(isolate, js_mutex); @@ -86,5 +88,96 @@ BUILTIN(AtomicsMutexTryLock) { return ReadOnlyRoots(isolate).false_value(); } +BUILTIN(AtomicsConditionConstructor) { + DCHECK(FLAG_harmony_struct); + HandleScope scope(isolate); + return *JSAtomicsCondition::Create(isolate); +} + +BUILTIN(AtomicsConditionWait) { + DCHECK(FLAG_harmony_struct); + constexpr char method_name[] = "Atomics.Condition.wait"; + HandleScope scope(isolate); + + Handle js_condition_obj = args.atOrUndefined(isolate, 1); + Handle js_mutex_obj = args.atOrUndefined(isolate, 2); + Handle timeout_obj = args.atOrUndefined(isolate, 3); + if (!js_condition_obj->IsJSAtomicsCondition() || + !js_mutex_obj->IsJSAtomicsMutex()) { + THROW_NEW_ERROR_RETURN_FAILURE( + isolate, NewTypeError(MessageTemplate::kMethodInvokedOnWrongType, + isolate->factory()->NewStringFromAsciiChecked( + method_name))); + } + + base::Optional timeout = base::nullopt; + if (!timeout_obj->IsUndefined(isolate)) { + ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, timeout_obj, + Object::ToNumber(isolate, timeout_obj)); + double ms = timeout_obj->Number(); + if (!std::isnan(ms)) { + if (ms < 0) ms = 0; + if (ms <= static_cast(std::numeric_limits::max())) { + timeout = base::TimeDelta::FromMilliseconds(static_cast(ms)); + } + } + } + + if (!isolate->allow_atomics_wait()) { + THROW_NEW_ERROR_RETURN_FAILURE( + isolate, NewTypeError(MessageTemplate::kAtomicsOperationNotAllowed, + isolate->factory()->NewStringFromAsciiChecked( + method_name))); + } + + Handle js_condition = + Handle::cast(js_condition_obj); + Handle js_mutex = Handle::cast(js_mutex_obj); + + if (!js_mutex->IsCurrentThreadOwner()) { + THROW_NEW_ERROR_RETURN_FAILURE( + isolate, + NewTypeError(MessageTemplate::kAtomicsMutexNotOwnedByCurrentThread)); + } + + return isolate->heap()->ToBoolean( + JSAtomicsCondition::WaitFor(isolate, js_condition, js_mutex, timeout)); +} + +BUILTIN(AtomicsConditionNotify) { + DCHECK(FLAG_harmony_struct); + constexpr char method_name[] = "Atomics.Condition.notify"; + HandleScope scope(isolate); + + Handle js_condition_obj = args.atOrUndefined(isolate, 1); + Handle count_obj = args.atOrUndefined(isolate, 2); + if (!js_condition_obj->IsJSAtomicsCondition()) { + THROW_NEW_ERROR_RETURN_FAILURE( + isolate, NewTypeError(MessageTemplate::kMethodInvokedOnWrongType, + isolate->factory()->NewStringFromAsciiChecked( + method_name))); + } + + uint32_t count; + if (count_obj->IsUndefined(isolate)) { + count = JSAtomicsCondition::kAllWaiters; + } else { + ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, count_obj, + Object::ToInteger(isolate, count_obj)); + double count_double = count_obj->Number(); + if (count_double < 0) { + count_double = 0; + } else if (count_double > JSAtomicsCondition::kAllWaiters) { + count_double = JSAtomicsCondition::kAllWaiters; + } + count = static_cast(count_double); + } + + Handle js_condition = + Handle::cast(js_condition_obj); + return *isolate->factory()->NewNumberFromUint( + js_condition->Notify(isolate, count)); +} + } // namespace internal } // namespace v8 diff --git a/src/builtins/builtins-definitions.h b/src/builtins/builtins-definitions.h index 641838adbc..bdc166e9a1 100644 --- a/src/builtins/builtins-definitions.h +++ b/src/builtins/builtins-definitions.h @@ -1005,6 +1005,9 @@ namespace internal { CPP(AtomicsMutexConstructor) \ CPP(AtomicsMutexLock) \ CPP(AtomicsMutexTryLock) \ + CPP(AtomicsConditionConstructor) \ + CPP(AtomicsConditionWait) \ + CPP(AtomicsConditionNotify) \ \ /* AsyncGenerator */ \ \ diff --git a/src/builtins/builtins-sharedarraybuffer.cc b/src/builtins/builtins-sharedarraybuffer.cc index ee6c70b02a..ea2fba9bbe 100644 --- a/src/builtins/builtins-sharedarraybuffer.cc +++ b/src/builtins/builtins-sharedarraybuffer.cc @@ -231,7 +231,9 @@ Object DoWait(Isolate* isolate, FutexEmulation::WaitMode mode, if (mode == FutexEmulation::WaitMode::kSync && !isolate->allow_atomics_wait()) { THROW_NEW_ERROR_RETURN_FAILURE( - isolate, NewTypeError(MessageTemplate::kAtomicsWaitNotAllowed)); + isolate, NewTypeError(MessageTemplate::kAtomicsOperationNotAllowed, + isolate->factory()->NewStringFromAsciiChecked( + "Atomics.wait"))); } Handle array_buffer = sta->GetBuffer(); diff --git a/src/common/message-template.h b/src/common/message-template.h index b0f2c6628b..636b1b41cd 100644 --- a/src/common/message-template.h +++ b/src/common/message-template.h @@ -41,9 +41,9 @@ namespace internal { T(AwaitNotInDebugEvaluate, \ "await can not be used when evaluating code " \ "while paused in the debugger") \ - T(AtomicsMutexLockNotAllowed, \ - "Atomics.Mutex.lock cannot be called in this context") \ - T(AtomicsWaitNotAllowed, "Atomics.wait cannot be called in this context") \ + T(AtomicsMutexNotOwnedByCurrentThread, \ + "Atomics.Mutex is not owned by the current agent") \ + T(AtomicsOperationNotAllowed, "% cannot be called in this context") \ T(BadRoundingType, "RoundingType is not fractionDigits") \ T(BadSortComparisonFunction, \ "The comparison function must be either a function or undefined") \ diff --git a/src/compiler/types.cc b/src/compiler/types.cc index 82fedec2cb..676af8d54c 100644 --- a/src/compiler/types.cc +++ b/src/compiler/types.cc @@ -264,6 +264,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) { case JS_SHADOW_REALM_TYPE: case JS_SHARED_ARRAY_TYPE: case JS_SHARED_STRUCT_TYPE: + case JS_ATOMICS_CONDITION_TYPE: case JS_ATOMICS_MUTEX_TYPE: case JS_TEMPORAL_CALENDAR_TYPE: case JS_TEMPORAL_DURATION_TYPE: diff --git a/src/diagnostics/objects-debug.cc b/src/diagnostics/objects-debug.cc index 2e634da978..1c7e4706d0 100644 --- a/src/diagnostics/objects-debug.cc +++ b/src/diagnostics/objects-debug.cc @@ -551,7 +551,8 @@ void Map::MapVerify(Isolate* isolate) { JSObject::GetEmbedderFieldCount(*this) * kEmbedderDataSlotSize, inobject_fields_start_offset); - if (IsJSSharedStructMap() || IsJSSharedArrayMap()) { + if (IsJSSharedStructMap() || IsJSSharedArrayMap() || IsJSAtomicsMutex() || + IsJSAtomicsCondition()) { CHECK(InSharedHeap()); CHECK(GetBackPointer().IsUndefined(isolate)); Object maybe_cell = prototype_validity_cell(); @@ -1265,10 +1266,12 @@ void JSAtomicsMutex::JSAtomicsMutexVerify(Isolate* isolate) { CHECK(IsJSAtomicsMutex()); CHECK(InSharedWritableHeap()); JSObjectVerify(isolate); - Map mutex_map = map(); - CHECK(mutex_map.GetBackPointer().IsUndefined(isolate)); - CHECK(!mutex_map.is_extensible()); - CHECK(!mutex_map.is_prototype_map()); +} + +void JSAtomicsCondition::JSAtomicsConditionVerify(Isolate* isolate) { + CHECK(IsJSAtomicsCondition()); + CHECK(InSharedHeap()); + JSObjectVerify(isolate); } void JSSharedArray::JSSharedArrayVerify(Isolate* isolate) { diff --git a/src/diagnostics/objects-printer.cc b/src/diagnostics/objects-printer.cc index e3548d4b6c..55ede13f47 100644 --- a/src/diagnostics/objects-printer.cc +++ b/src/diagnostics/objects-printer.cc @@ -1495,6 +1495,15 @@ void JSAtomicsMutex::JSAtomicsMutexPrint(std::ostream& os) { JSObjectPrintBody(os, *this); } +void JSAtomicsCondition::JSAtomicsConditionPrint(std::ostream& os) { + JSObjectPrintHeader(os, *this, "JSAtomicsCondition"); + Isolate* isolate = GetIsolateFromWritableObject(*this); + os << "\n - isolate: " << isolate; + if (isolate->is_shared()) os << " (shared)"; + os << "\n - state: " << this->state(); + JSObjectPrintBody(os, *this); +} + void JSWeakMap::JSWeakMapPrint(std::ostream& os) { JSObjectPrintHeader(os, *this, "JSWeakMap"); os << "\n - table: " << Brief(table()); diff --git a/src/heap/objects-visiting.h b/src/heap/objects-visiting.h index 5c2aba2b36..c1d73fb8d9 100644 --- a/src/heap/objects-visiting.h +++ b/src/heap/objects-visiting.h @@ -31,12 +31,12 @@ namespace internal { V(FeedbackMetadata) \ V(FixedDoubleArray) \ V(JSArrayBuffer) \ - V(JSAtomicsMutex) \ V(JSDataView) \ V(JSExternalObject) \ V(JSFinalizationRegistry) \ V(JSFunction) \ V(JSObject) \ + V(JSSynchronizationPrimitive) \ V(JSTypedArray) \ V(WeakCell) \ V(JSWeakCollection) \ diff --git a/src/init/bootstrapper.cc b/src/init/bootstrapper.cc index cac51f4fcc..1c35ab0375 100644 --- a/src/init/bootstrapper.cc +++ b/src/init/bootstrapper.cc @@ -4663,8 +4663,10 @@ void Genesis::InitializeGlobal_harmony_struct() { DONT_ENUM); } + // TODO(v8:12547): Make a single canonical copy of the Mutex and Condition + // maps. + { // Atomics.Mutex - // TODO(syg): Make a single canonical copy of the map. Handle mutex_str = isolate()->factory()->InternalizeUtf8String("Mutex"); Handle mutex_fun = CreateSharedObjectConstructor( @@ -4683,6 +4685,27 @@ void Genesis::InitializeGlobal_harmony_struct() { SimpleInstallFunction(isolate(), mutex_fun, "tryLock", Builtin::kAtomicsMutexTryLock, 2, true); } + + { // Atomics.Condition + Handle condition_str = + isolate()->factory()->InternalizeUtf8String("Condition"); + Handle condition_fun = CreateSharedObjectConstructor( + isolate(), condition_str, JS_ATOMICS_CONDITION_TYPE, + JSAtomicsCondition::kHeaderSize, TERMINAL_FAST_ELEMENTS_KIND, + Builtin::kAtomicsConditionConstructor); + condition_fun->shared().set_internal_formal_parameter_count( + JSParameterCount(0)); + condition_fun->shared().set_length(0); + native_context()->set_js_atomics_condition_map( + condition_fun->initial_map()); + JSObject::AddProperty(isolate(), isolate()->atomics_object(), condition_str, + condition_fun, DONT_ENUM); + + SimpleInstallFunction(isolate(), condition_fun, "wait", + Builtin::kAtomicsConditionWait, 2, false); + SimpleInstallFunction(isolate(), condition_fun, "notify", + Builtin::kAtomicsConditionNotify, 2, false); + } } void Genesis::InitializeGlobal_harmony_array_find_last() { diff --git a/src/objects/contexts.h b/src/objects/contexts.h index d683cc1f5b..c485ecae46 100644 --- a/src/objects/contexts.h +++ b/src/objects/contexts.h @@ -178,6 +178,7 @@ enum ContextLookupFlags { js_array_packed_double_elements_map) \ V(JS_ARRAY_HOLEY_DOUBLE_ELEMENTS_MAP_INDEX, Map, \ js_array_holey_double_elements_map) \ + V(JS_ATOMICS_CONDITION_MAP, Map, js_atomics_condition_map) \ V(JS_ATOMICS_MUTEX_MAP, Map, js_atomics_mutex_map) \ V(JS_MAP_FUN_INDEX, JSFunction, js_map_fun) \ V(JS_MAP_MAP_INDEX, Map, js_map_map) \ diff --git a/src/objects/js-atomics-synchronization-inl.h b/src/objects/js-atomics-synchronization-inl.h index accfa49cb5..07710e8d8d 100644 --- a/src/objects/js-atomics-synchronization-inl.h +++ b/src/objects/js-atomics-synchronization-inl.h @@ -19,6 +19,15 @@ namespace internal { #include "torque-generated/src/objects/js-atomics-synchronization-tq-inl.inc" +TQ_OBJECT_CONSTRUCTORS_IMPL(JSSynchronizationPrimitive) + +std::atomic* +JSSynchronizationPrimitive::AtomicStatePtr() { + StateT* state_ptr = reinterpret_cast(field_address(kStateOffset)); + DCHECK(IsAligned(reinterpret_cast(state_ptr), sizeof(StateT))); + return base::AsAtomicPtr(state_ptr); +} + TQ_OBJECT_CONSTRUCTORS_IMPL(JSAtomicsMutex) CAST_ACCESSOR(JSAtomicsMutex) @@ -111,18 +120,16 @@ void JSAtomicsMutex::ClearOwnerThread() { std::memory_order_relaxed); } -std::atomic* JSAtomicsMutex::AtomicStatePtr() { - StateT* state_ptr = reinterpret_cast(field_address(kStateOffset)); - DCHECK(IsAligned(reinterpret_cast(state_ptr), sizeof(StateT))); - return base::AsAtomicPtr(state_ptr); -} - std::atomic* JSAtomicsMutex::AtomicOwnerThreadIdPtr() { int32_t* owner_thread_id_ptr = reinterpret_cast(field_address(kOwnerThreadIdOffset)); return base::AsAtomicPtr(owner_thread_id_ptr); } +TQ_OBJECT_CONSTRUCTORS_IMPL(JSAtomicsCondition) + +CAST_ACCESSOR(JSAtomicsCondition) + } // namespace internal } // namespace v8 diff --git a/src/objects/js-atomics-synchronization.cc b/src/objects/js-atomics-synchronization.cc index 110fe09c82..5ea3e21c30 100644 --- a/src/objects/js-atomics-synchronization.cc +++ b/src/objects/js-atomics-synchronization.cc @@ -7,6 +7,7 @@ #include "src/base/macros.h" #include "src/base/platform/condition-variable.h" #include "src/base/platform/mutex.h" +#include "src/base/platform/time.h" #include "src/base/platform/yield-processor.h" #include "src/execution/isolate-inl.h" #include "src/heap/parked-scope.h" @@ -51,7 +52,7 @@ class V8_NODISCARD WaiterQueueNode final { static_cast(state & T::kWaiterQueueHeadMask); if (handle == 0) return nullptr; // The external pointer is cleared after decoding to prevent reuse by - // multiple mutexes in case of heap corruption. + // multiple synchronization primitives in case of heap corruption. return reinterpret_cast( requester->shared_external_pointer_table().Exchange( handle, kNullAddress, kWaiterQueueNodeTag)); @@ -63,6 +64,7 @@ class V8_NODISCARD WaiterQueueNode final { // Enqueues {new_tail}, mutating {head} to be the new head. static void Enqueue(WaiterQueueNode** head, WaiterQueueNode* new_tail) { DCHECK_NOT_NULL(head); + new_tail->VerifyNotInList(); WaiterQueueNode* current_head = *head; if (current_head == nullptr) { new_tail->next_ = new_tail; @@ -77,8 +79,7 @@ class V8_NODISCARD WaiterQueueNode final { } } - // Dequeues a waiter and returns it; {head} is mutated to be the new - // head. + // Dequeues a waiter and returns it; mutating {head} to be the new head. static WaiterQueueNode* Dequeue(WaiterQueueNode** head) { DCHECK_NOT_NULL(head); DCHECK_NOT_NULL(*head); @@ -92,9 +93,54 @@ class V8_NODISCARD WaiterQueueNode final { tail->next_ = new_head; *head = new_head; } + current_head->SetNotInListForVerification(); return current_head; } + // Splits at most {count} nodes of the waiter list of into its own list and + // returns it, mutating {head} to be the head of the back list. + static WaiterQueueNode* Split(WaiterQueueNode** head, uint32_t count) { + DCHECK_GT(count, 0); + DCHECK_NOT_NULL(head); + DCHECK_NOT_NULL(*head); + WaiterQueueNode* front_head = *head; + WaiterQueueNode* back_head = front_head; + uint32_t actual_count = 0; + while (actual_count < count) { + back_head = back_head->next_; + // The queue is shorter than the requested count, return the whole queue. + if (back_head == front_head) { + *head = nullptr; + return front_head; + } + actual_count++; + } + WaiterQueueNode* front_tail = back_head->prev_; + WaiterQueueNode* back_tail = front_head->prev_; + + // Fix up the back list (i.e. remainder of the list). + back_head->prev_ = back_tail; + back_tail->next_ = back_head; + *head = back_head; + + // Fix up and return the front list (i.e. the dequeued list). + front_head->prev_ = front_tail; + front_tail->next_ = front_head; + return front_head; + } + + // This method must be called from a known waiter queue head. Incorrectly + // encoded lists can cause this method to infinitely loop. + static int LengthFromHead(WaiterQueueNode* head) { + WaiterQueueNode* cur = head; + int len = 0; + do { + len++; + cur = cur->next_; + } while (cur != head); + return len; + } + void Wait(Isolate* requester) { AllowGarbageCollection allow_before_parking; ParkedScope parked_scope(requester->main_thread_local_heap()); @@ -104,15 +150,57 @@ class V8_NODISCARD WaiterQueueNode final { } } + // Returns false if timed out, true otherwise. + bool WaitFor(Isolate* requester, const base::TimeDelta& rel_time) { + AllowGarbageCollection allow_before_parking; + ParkedScope parked_scope(requester->main_thread_local_heap()); + base::MutexGuard guard(&wait_lock_); + base::TimeTicks current_time = base::TimeTicks::Now(); + base::TimeTicks timeout_time = current_time + rel_time; + for (;;) { + if (!should_wait) return true; + current_time = base::TimeTicks::Now(); + if (current_time >= timeout_time) return false; + base::TimeDelta time_until_timeout = timeout_time - current_time; + bool wait_res = wait_cond_var_.WaitFor(&wait_lock_, time_until_timeout); + USE(wait_res); + // The wake up may have been spurious, so loop again. + } + } + void Notify() { base::MutexGuard guard(&wait_lock_); should_wait = false; wait_cond_var_.NotifyOne(); + SetNotInListForVerification(); + } + + uint32_t NotifyAllInList() { + WaiterQueueNode* cur = this; + uint32_t count = 0; + do { + WaiterQueueNode* next = cur->next_; + cur->Notify(); + cur = next; + count++; + } while (cur != this); + return count; } bool should_wait = false; private: + void VerifyNotInList() { + DCHECK_NULL(next_); + DCHECK_NULL(prev_); + } + + void SetNotInListForVerification() { +#ifdef DEBUG + next_ = prev_ = nullptr; +#endif + } + // The queue wraps around, e.g. the head's prev is the tail, and the tail's // next is the head. WaiterQueueNode* next_ = nullptr; @@ -187,7 +275,7 @@ void JSAtomicsMutex::LockSlowPath(Isolate* requester, // put the requester thread on the waiter queue. // Allocate a waiter queue node on-stack, since this thread is going to - // sleep and will be blocked anyaway. + // sleep and will be blocked anyway. WaiterQueueNode this_waiter; { @@ -267,5 +355,157 @@ void JSAtomicsMutex::UnlockSlowPath(Isolate* requester, old_head->Notify(); } +// static +Handle JSAtomicsCondition::Create(Isolate* isolate) { + auto* factory = isolate->factory(); + Handle map = isolate->js_atomics_condition_map(); + Handle cond = Handle::cast( + factory->NewJSObjectFromMap(map, AllocationType::kSharedOld)); + cond->set_state(kEmptyState); + return cond; +} + +bool JSAtomicsCondition::TryLockWaiterQueueExplicit(std::atomic* state, + StateT& expected) { + // Try to acquire the queue lock. + expected &= ~kIsWaiterQueueLockedBit; + return state->compare_exchange_weak( + expected, expected | kIsWaiterQueueLockedBit, std::memory_order_acquire, + std::memory_order_relaxed); +} + +// static +bool JSAtomicsCondition::WaitFor(Isolate* requester, + Handle cv, + Handle mutex, + base::Optional timeout) { + DisallowGarbageCollection no_gc; + + // Allocate a waiter queue node on-stack, since this thread is going to sleep + // and will be blocked anyway. + WaiterQueueNode this_waiter; + + { + // The state pointer should not be used outside of this block as a shared GC + // may reallocate it after waiting. + std::atomic* state = cv->AtomicStatePtr(); + + // Try to acquire the queue lock, which is itself a spinlock. + StateT current_state = state->load(std::memory_order_relaxed); + while (!cv->TryLockWaiterQueueExplicit(state, current_state)) { + YIELD_PROCESSOR; + } + + // With the queue lock held, enqueue the requester onto the waiter queue. + this_waiter.should_wait = true; + WaiterQueueNode* waiter_head = + WaiterQueueNode::DestructivelyDecodeHead( + requester, current_state); + WaiterQueueNode::Enqueue(&waiter_head, &this_waiter); + + // Release the queue lock and install the new waiter queue head by creating + // a new state. + DCHECK_EQ(state->load(), current_state | kIsWaiterQueueLockedBit); + StateT new_state = + WaiterQueueNode::EncodeHead(requester, waiter_head); + state->store(new_state, std::memory_order_release); + } + + // Release the mutex and wait for another thread to wake us up, reacquiring + // the mutex upon wakeup. + mutex->Unlock(requester); + bool rv; + if (timeout) { + rv = this_waiter.WaitFor(requester, *timeout); + } else { + this_waiter.Wait(requester); + rv = true; + } + JSAtomicsMutex::Lock(requester, mutex); + return rv; +} + +uint32_t JSAtomicsCondition::Notify(Isolate* requester, uint32_t count) { + std::atomic* state = AtomicStatePtr(); + + // To wake a sleeping thread, first acquire the queue lock, which is itself a + // spinlock. + StateT current_state = state->load(std::memory_order_relaxed); + // There are no waiters. + if (current_state == kEmptyState) return 0; + while (!TryLockWaiterQueueExplicit(state, current_state)) { + YIELD_PROCESSOR; + } + + // Get the waiter queue head. + WaiterQueueNode* waiter_head = + WaiterQueueNode::DestructivelyDecodeHead( + requester, current_state); + + // There's no waiter to wake up, release the queue lock by setting it to the + // empty state. + if (waiter_head == nullptr) { + DCHECK_EQ(state->load(), current_state | kIsWaiterQueueLockedBit); + state->store(kEmptyState, std::memory_order_release); + return 0; + } + + WaiterQueueNode* old_head; + if (count == 1) { + old_head = WaiterQueueNode::Dequeue(&waiter_head); + } else if (count == kAllWaiters) { + old_head = waiter_head; + waiter_head = nullptr; + } else { + old_head = WaiterQueueNode::Split(&waiter_head, count); + } + + // Release the queue lock and install the new waiter queue head by creating a + // new state. + DCHECK_EQ(state->load(), current_state | kIsWaiterQueueLockedBit); + StateT new_state = + WaiterQueueNode::EncodeHead(requester, waiter_head); + state->store(new_state, std::memory_order_release); + + // Notify the waiters. + if (count == 1) { + old_head->Notify(); + return 1; + } + return old_head->NotifyAllInList(); +} + +Object JSAtomicsCondition::NumWaitersForTesting(Isolate* isolate) { + DisallowGarbageCollection no_gc; + std::atomic* state = AtomicStatePtr(); + StateT current_state = state->load(std::memory_order_relaxed); + + // There are no waiters. + if (current_state == kEmptyState) return Smi::FromInt(0); + + int num_waiters; + { + // Take the queue lock. + while (!TryLockWaiterQueueExplicit(state, current_state)) { + YIELD_PROCESSOR; + } + + // Get the waiter queue head. + WaiterQueueNode* waiter_head = + WaiterQueueNode::DestructivelyDecodeHead( + isolate, current_state); + num_waiters = WaiterQueueNode::LengthFromHead(waiter_head); + + // Release the queue lock and reinstall the same queue head by creating a + // new state. + DCHECK_EQ(state->load(), current_state | kIsWaiterQueueLockedBit); + StateT new_state = + WaiterQueueNode::EncodeHead(isolate, waiter_head); + state->store(new_state, std::memory_order_release); + } + + return Smi::FromInt(num_waiters); +} + } // namespace internal } // namespace v8 diff --git a/src/objects/js-atomics-synchronization.h b/src/objects/js-atomics-synchronization.h index b89738c122..a656cb6e56 100644 --- a/src/objects/js-atomics-synchronization.h +++ b/src/objects/js-atomics-synchronization.h @@ -7,6 +7,7 @@ #include +#include "src/base/platform/time.h" #include "src/execution/thread-id.h" #include "src/objects/js-objects.h" @@ -22,12 +23,38 @@ namespace detail { class WaiterQueueNode; } // namespace detail +// Base class for JSAtomicsMutex and JSAtomicsCondition +class JSSynchronizationPrimitive + : public TorqueGeneratedJSSynchronizationPrimitive< + JSSynchronizationPrimitive, JSObject> { + public: + // Synchronization only store raw data as state. + static constexpr int kEndOfTaggedFieldsOffset = JSObject::kHeaderSize; + class BodyDescriptor; + + TQ_OBJECT_CONSTRUCTORS(JSSynchronizationPrimitive) + + protected: +#ifdef V8_COMPRESS_POINTERS + using StateT = uint32_t; + static_assert(sizeof(StateT) == sizeof(ExternalPointerHandle)); +#else + using StateT = uintptr_t; +#endif // V8_COMPRESS_POINTERS + + inline std::atomic* AtomicStatePtr(); + + using TorqueGeneratedJSSynchronizationPrimitive::state; + using TorqueGeneratedJSSynchronizationPrimitive::set_state; +}; + // A non-recursive mutex that is exposed to JS. // // It has the following properties: -// - Slim: 8-12 bytes. Lock state is 4 bytes when -// V8_SANDBOXED_EXTERNAL_POINTERS, and sizeof(void*) otherwise. Owner -// thread is an additional 4 bytes. +// - Slim: 8-12 bytes. Lock state is 4 bytes when V8_COMPRESS_POINTERS, and +// sizeof(void*) otherwise. Owner thread is an additional 4 bytes. // - Fast when uncontended: a single weak CAS. // - Possibly unfair under contention. // - Moving GC safe. It uses an index into the shared Isolate's external @@ -41,7 +68,8 @@ class WaiterQueueNode; // it implements a futex in userland. The algorithm is inspired by WebKit's // ParkingLot. class JSAtomicsMutex - : public TorqueGeneratedJSAtomicsMutex { + : public TorqueGeneratedJSAtomicsMutex { public: // A non-copyable wrapper class that provides an RAII-style mechanism for // owning the JSAtomicsMutex. @@ -96,9 +124,6 @@ class JSAtomicsMutex inline bool IsHeld(); inline bool IsCurrentThreadOwner(); - static constexpr int kEndOfTaggedFieldsOffset = JSObject::kHeaderSize; - class BodyDescriptor; - TQ_OBJECT_CONSTRUCTORS(JSAtomicsMutex) private: @@ -110,13 +135,6 @@ class JSAtomicsMutex static constexpr int kIsWaiterQueueLockedBit = 1 << 1; static constexpr int kLockBitsSize = 2; -#ifdef V8_COMPRESS_POINTERS - using StateT = uint32_t; - static_assert(sizeof(StateT) == sizeof(ExternalPointerHandle)); -#else - using StateT = uintptr_t; -#endif - static constexpr StateT kUnlocked = 0; static constexpr StateT kLockedUncontended = 1; @@ -126,7 +144,6 @@ class JSAtomicsMutex inline void SetCurrentThreadAsOwner(); inline void ClearOwnerThread(); - inline std::atomic* AtomicStatePtr(); inline std::atomic* AtomicOwnerThreadIdPtr(); bool TryLockExplicit(std::atomic* state, StateT& expected); @@ -138,12 +155,59 @@ class JSAtomicsMutex V8_EXPORT_PRIVATE void UnlockSlowPath(Isolate* requester, std::atomic* state); - using TorqueGeneratedJSAtomicsMutex::state; - using TorqueGeneratedJSAtomicsMutex::set_state; - using TorqueGeneratedJSAtomicsMutex::owner_thread_id; - using TorqueGeneratedJSAtomicsMutex::set_owner_thread_id; + using TorqueGeneratedJSAtomicsMutex< + JSAtomicsMutex, JSSynchronizationPrimitive>::owner_thread_id; + using TorqueGeneratedJSAtomicsMutex< + JSAtomicsMutex, JSSynchronizationPrimitive>::set_owner_thread_id; +}; + +// A condition variable that is exposed to JS. +// +// It has the following properties: +// - Slim: 4-8 bytes. Lock state is 4 bytes when V8_COMPRESS_POINTERS, and +// sizeof(void*) otherwise. +// - Moving GC safe. It uses an index into the shared Isolate's external +// pointer table to store a queue of sleeping threads. +// - Parks the main thread LocalHeap when waiting. Unparks the main thread +// LocalHeap after waking up. +// +// This condition variable manages its own queue of waiting threads, like +// JSAtomicsMutex. The algorithm is inspired by WebKit's ParkingLot. +class JSAtomicsCondition + : public TorqueGeneratedJSAtomicsCondition { + public: + DECL_CAST(JSAtomicsCondition) + DECL_PRINTER(JSAtomicsCondition) + EXPORT_DECL_VERIFIER(JSAtomicsCondition) + + V8_EXPORT_PRIVATE static Handle Create(Isolate* isolate); + + V8_EXPORT_PRIVATE static bool WaitFor( + Isolate* requester, Handle cv, + Handle mutex, base::Optional timeout); + + static constexpr uint32_t kAllWaiters = UINT32_MAX; + + // Notify {count} waiters. Returns the number of waiters woken up. + V8_EXPORT_PRIVATE uint32_t Notify(Isolate* requester, uint32_t count); + + Object NumWaitersForTesting(Isolate* isolate); + + TQ_OBJECT_CONSTRUCTORS(JSAtomicsCondition) + + private: + friend class detail::WaiterQueueNode; + + // There is 1 lock bit: whether the waiter queue is locked. + static constexpr int kIsWaiterQueueLockedBit = 1 << 0; + static constexpr int kLockBitsSize = 1; + + static constexpr StateT kEmptyState = 0; + static constexpr StateT kLockBitsMask = (1 << kLockBitsSize) - 1; + static constexpr StateT kWaiterQueueHeadMask = ~kLockBitsMask; + + bool TryLockWaiterQueueExplicit(std::atomic* state, StateT& expected); }; } // namespace internal diff --git a/src/objects/js-atomics-synchronization.tq b/src/objects/js-atomics-synchronization.tq index 003e42c31e..791020f0f6 100644 --- a/src/objects/js-atomics-synchronization.tq +++ b/src/objects/js-atomics-synchronization.tq @@ -2,12 +2,17 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. -extern class JSAtomicsMutex extends JSObject { +@abstract +extern class JSSynchronizationPrimitive extends JSObject { @if(TAGGED_SIZE_8_BYTES) state: uintptr; @ifnot(TAGGED_SIZE_8_BYTES) state: uint32; +} +extern class JSAtomicsMutex extends JSSynchronizationPrimitive { owner_thread_id: int32; @if(TAGGED_SIZE_8_BYTES) optional_padding: uint32; @ifnot(TAGGED_SIZE_8_BYTES) optional_padding: void; } + +extern class JSAtomicsCondition extends JSSynchronizationPrimitive {} diff --git a/src/objects/js-objects.cc b/src/objects/js-objects.cc index 902e350b45..d7bde08dbc 100644 --- a/src/objects/js-objects.cc +++ b/src/objects/js-objects.cc @@ -2474,6 +2474,8 @@ int JSObject::GetHeaderSize(InstanceType type, return JSSharedStruct::kHeaderSize; case JS_ATOMICS_MUTEX_TYPE: return JSAtomicsMutex::kHeaderSize; + case JS_ATOMICS_CONDITION_TYPE: + return JSAtomicsCondition::kHeaderSize; case JS_TEMPORAL_CALENDAR_TYPE: return JSTemporalCalendar::kHeaderSize; case JS_TEMPORAL_DURATION_TYPE: diff --git a/src/objects/map.cc b/src/objects/map.cc index c7f6f8d695..d92b84bb68 100644 --- a/src/objects/map.cc +++ b/src/objects/map.cc @@ -324,6 +324,7 @@ VisitorId Map::GetVisitorId(Map map) { #endif // V8_ENABLE_WEBASSEMBLY case JS_BOUND_FUNCTION_TYPE: case JS_WRAPPED_FUNCTION_TYPE: { + // Is GetEmbedderFieldCount(map) > 0 for Atomics.Mutex? const bool has_raw_data_fields = COMPRESS_POINTERS_BOOL && JSObject::GetEmbedderFieldCount(map) > 0; return has_raw_data_fields ? kVisitJSObject : kVisitJSObjectFast; @@ -337,15 +338,16 @@ VisitorId Map::GetVisitorId(Map map) { case JS_WEAK_REF_TYPE: return kVisitJSWeakRef; - case JS_ATOMICS_MUTEX_TYPE: - return kVisitJSAtomicsMutex; - case WEAK_CELL_TYPE: return kVisitWeakCell; case JS_FINALIZATION_REGISTRY_TYPE: return kVisitJSFinalizationRegistry; + case JS_ATOMICS_MUTEX_TYPE: + case JS_ATOMICS_CONDITION_TYPE: + return kVisitJSSynchronizationPrimitive; + case FILLER_TYPE: case FOREIGN_TYPE: case HEAP_NUMBER_TYPE: diff --git a/src/objects/map.h b/src/objects/map.h index cbab6fa4d5..6f4d67f2fe 100644 --- a/src/objects/map.h +++ b/src/objects/map.h @@ -47,13 +47,13 @@ enum InstanceType : uint16_t; V(FreeSpace) \ V(JSApiObject) \ V(JSArrayBuffer) \ - V(JSAtomicsMutex) \ V(JSDataView) \ V(JSExternalObject) \ V(JSFinalizationRegistry) \ V(JSFunction) \ V(JSObject) \ V(JSObjectFast) \ + V(JSSynchronizationPrimitive) \ V(JSTypedArray) \ V(JSWeakRef) \ V(JSWeakCollection) \ diff --git a/src/objects/object-list-macros.h b/src/objects/object-list-macros.h index a7bc1c43ef..183c414f2e 100644 --- a/src/objects/object-list-macros.h +++ b/src/objects/object-list-macros.h @@ -132,6 +132,7 @@ class ZoneForwardList; V(JSAsyncFromSyncIterator) \ V(JSAsyncFunctionObject) \ V(JSAsyncGeneratorObject) \ + V(JSAtomicsCondition) \ V(JSAtomicsMutex) \ V(JSBoundFunction) \ V(JSCollection) \ @@ -167,6 +168,7 @@ class ZoneForwardList; V(JSSharedStruct) \ V(JSSpecialObject) \ V(JSStringIterator) \ + V(JSSynchronizationPrimitive) \ V(JSTemporalCalendar) \ V(JSTemporalDuration) \ V(JSTemporalInstant) \ diff --git a/src/objects/objects-body-descriptors-inl.h b/src/objects/objects-body-descriptors-inl.h index 117d9011ab..25c885e6e0 100644 --- a/src/objects/objects-body-descriptors-inl.h +++ b/src/objects/objects-body-descriptors-inl.h @@ -668,7 +668,8 @@ class JSWeakCollection::BodyDescriptorImpl final : public BodyDescriptorBase { } }; -class JSAtomicsMutex::BodyDescriptor final : public BodyDescriptorBase { +class JSSynchronizationPrimitive::BodyDescriptor final + : public BodyDescriptorBase { public: static bool IsValidSlot(Map map, HeapObject obj, int offset) { if (offset < kEndOfTaggedFieldsOffset) return true; @@ -680,7 +681,6 @@ class JSAtomicsMutex::BodyDescriptor final : public BodyDescriptorBase { static inline void IterateBody(Map map, HeapObject obj, int object_size, ObjectVisitor* v) { IteratePointers(obj, kPropertiesOrHashOffset, kEndOfTaggedFieldsOffset, v); - IterateJSObjectBodyImpl(map, obj, kHeaderSize, object_size, v); } static inline int SizeOf(Map map, HeapObject object) { @@ -1309,7 +1309,8 @@ auto BodyDescriptorApply(InstanceType type, Args&&... args) { case JS_PROXY_TYPE: return CALL_APPLY(JSProxy); case JS_ATOMICS_MUTEX_TYPE: - return CALL_APPLY(JSAtomicsMutex); + case JS_ATOMICS_CONDITION_TYPE: + return CALL_APPLY(JSSynchronizationPrimitive); case FOREIGN_TYPE: return CALL_APPLY(Foreign); case MAP_TYPE: diff --git a/src/objects/objects-inl.h b/src/objects/objects-inl.h index 69b82f8a19..23b1cbe612 100644 --- a/src/objects/objects-inl.h +++ b/src/objects/objects-inl.h @@ -1188,6 +1188,7 @@ bool Object::IsShared() const { case JS_SHARED_ARRAY_TYPE: case JS_SHARED_STRUCT_TYPE: case JS_ATOMICS_MUTEX_TYPE: + case JS_ATOMICS_CONDITION_TYPE: DCHECK(object.InSharedHeap()); return true; case INTERNALIZED_STRING_TYPE: diff --git a/src/objects/value-serializer.cc b/src/objects/value-serializer.cc index e7788029a3..13308440e1 100644 --- a/src/objects/value-serializer.cc +++ b/src/objects/value-serializer.cc @@ -615,6 +615,7 @@ Maybe ValueSerializer::WriteJSReceiver(Handle receiver) { case JS_SHARED_STRUCT_TYPE: return WriteJSSharedStruct(Handle::cast(receiver)); case JS_ATOMICS_MUTEX_TYPE: + case JS_ATOMICS_CONDITION_TYPE: return WriteSharedObject(receiver); #if V8_ENABLE_WEBASSEMBLY case WASM_MODULE_OBJECT_TYPE: diff --git a/src/runtime/runtime-test.cc b/src/runtime/runtime-test.cc index e7b3df9549..596d46cd2c 100644 --- a/src/runtime/runtime-test.cc +++ b/src/runtime/runtime-test.cc @@ -23,6 +23,7 @@ #include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop. #include "src/heap/heap-write-barrier-inl.h" #include "src/ic/stub-cache.h" +#include "src/objects/js-atomics-synchronization-inl.h" #include "src/objects/js-function-inl.h" #include "src/objects/js-regexp-inl.h" #include "src/objects/smi.h" @@ -1673,5 +1674,12 @@ RUNTIME_FUNCTION(Runtime_SharedGC) { return ReadOnlyRoots(isolate).undefined_value(); } +RUNTIME_FUNCTION(Runtime_AtomicsConditionNumWaitersForTesting) { + HandleScope scope(isolate); + DCHECK_EQ(1, args.length()); + Handle cv = args.at(0); + return cv->NumWaitersForTesting(isolate); +} + } // namespace internal } // namespace v8 diff --git a/src/runtime/runtime-wasm.cc b/src/runtime/runtime-wasm.cc index 0ac1922614..4fc9df75b8 100644 --- a/src/runtime/runtime-wasm.cc +++ b/src/runtime/runtime-wasm.cc @@ -106,8 +106,10 @@ class V8_NODISCARD ClearThreadInWasmScope { Isolate* isolate_; }; -Object ThrowWasmError(Isolate* isolate, MessageTemplate message) { - Handle error_obj = isolate->factory()->NewWasmRuntimeError(message); +Object ThrowWasmError(Isolate* isolate, MessageTemplate message, + Handle arg0 = Handle()) { + Handle error_obj = + isolate->factory()->NewWasmRuntimeError(message, arg0); JSObject::AddProperty(isolate, error_obj, isolate->factory()->wasm_uncatchable_symbol(), isolate->factory()->true_value(), NONE); @@ -370,7 +372,9 @@ RUNTIME_FUNCTION(Runtime_WasmI32AtomicWait) { // Trap if memory is not shared, or wait is not allowed on the isolate if (!array_buffer->is_shared() || !isolate->allow_atomics_wait()) { - return ThrowWasmError(isolate, MessageTemplate::kAtomicsWaitNotAllowed); + return ThrowWasmError( + isolate, MessageTemplate::kAtomicsOperationNotAllowed, + isolate->factory()->NewStringFromAsciiChecked("Atomics.wait")); } return FutexEmulation::WaitWasm32(isolate, array_buffer, offset, expected_value, timeout_ns.AsInt64()); @@ -393,7 +397,9 @@ RUNTIME_FUNCTION(Runtime_WasmI64AtomicWait) { // Trap if memory is not shared, or if wait is not allowed on the isolate if (!array_buffer->is_shared() || !isolate->allow_atomics_wait()) { - return ThrowWasmError(isolate, MessageTemplate::kAtomicsWaitNotAllowed); + return ThrowWasmError( + isolate, MessageTemplate::kAtomicsOperationNotAllowed, + isolate->factory()->NewStringFromAsciiChecked("Atomics.wait")); } return FutexEmulation::WaitWasm64(isolate, array_buffer, offset, expected_value.AsInt64(), diff --git a/src/runtime/runtime.h b/src/runtime/runtime.h index 8b8eddb3a7..f44ff7ed8b 100644 --- a/src/runtime/runtime.h +++ b/src/runtime/runtime.h @@ -65,7 +65,8 @@ namespace internal { F(SetAllowAtomicsWait, 1, 1) \ F(AtomicsLoadSharedStructOrArray, 2, 1) \ F(AtomicsStoreSharedStructOrArray, 3, 1) \ - F(AtomicsExchangeSharedStructOrArray, 3, 1) + F(AtomicsExchangeSharedStructOrArray, 3, 1) \ + F(AtomicsConditionNumWaitersForTesting, 1, 1) #define FOR_EACH_INTRINSIC_BIGINT(F, I) \ F(BigIntBinaryOp, 3, 1) \ diff --git a/test/mjsunit/shared-memory/condition-workers.js b/test/mjsunit/shared-memory/condition-workers.js new file mode 100644 index 0000000000..0c21d0a8cc --- /dev/null +++ b/test/mjsunit/shared-memory/condition-workers.js @@ -0,0 +1,43 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// +// Flags: --harmony-struct --allow-natives-syntax + +"use strict"; + +if (this.Worker) { + +(function TestWait() { + let workerScript = + `onmessage = function(msg) { + let mutex = msg.mutex; + let cv = msg.cv; + let res = Atomics.Mutex.lock(mutex, function() { + return Atomics.Condition.wait(cv, mutex); + }); + postMessage(res); + };`; + + let mutex = new Atomics.Mutex; + let cv = new Atomics.Condition; + let msg = {mutex, cv}; + + let worker1 = new Worker(workerScript, { type: 'string' }); + let worker2 = new Worker(workerScript, { type: 'string' }); + worker1.postMessage(msg); + worker2.postMessage(msg); + + // Spin until both workers are waiting. + while (%AtomicsConditionNumWaitersForTesting(cv) != 2) {} + + assertEquals(2, Atomics.Condition.notify(cv, 2)); + + assertEquals(true, worker1.getMessage()); + assertEquals(true, worker2.getMessage()); + + worker1.terminate(); + worker2.terminate(); +})(); + +} diff --git a/test/mjsunit/shared-memory/condition.js b/test/mjsunit/shared-memory/condition.js new file mode 100644 index 0000000000..8fa0e6e3b5 --- /dev/null +++ b/test/mjsunit/shared-memory/condition.js @@ -0,0 +1,36 @@ +// Copyright 2022 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. +// +// Flags: --harmony-struct --allow-natives-syntax + +let mutex = new Atomics.Mutex; +let cv = new Atomics.Condition; + +(function TestConditionWaitNotAllowed() { + assertThrows(() => { + Atomics.Mutex.lock(mutex, () => { + %SetAllowAtomicsWait(false); + Atomics.Condition.wait(cv, mutex); + }); + }); + %SetAllowAtomicsWait(true); +})(); + +(function TestConditionMutexNotHeld() { + // Cannot wait on a mutex not owned by the current thread. + assertThrows(() => { + Atomics.Condition.wait(cv, mutex); + }); +})(); + +(function TestConditionNoWaiters() { + // Notify returns number of threads woken up. + assertEquals(0, Atomics.Condition.notify(cv)); +})(); + +(function TestConditionWaitTimeout() { + Atomics.Mutex.lock(mutex, () => { + assertEquals(false, Atomics.Condition.wait(cv, mutex, 100)); + }); +})(); diff --git a/test/unittests/BUILD.gn b/test/unittests/BUILD.gn index a81788a951..929f2513b7 100644 --- a/test/unittests/BUILD.gn +++ b/test/unittests/BUILD.gn @@ -415,7 +415,7 @@ v8_source_set("unittests_sources") { "interpreter/source-position-matcher.cc", "interpreter/source-position-matcher.h", "interpreter/source-positions-unittest.cc", - "js-atomics/js-atomics-mutex-unittest.cc", + "js-atomics/js-atomics-synchronization-primitive-unittest.cc", "libplatform/default-job-unittest.cc", "libplatform/default-platform-unittest.cc", "libplatform/default-worker-threads-task-runner-unittest.cc", diff --git a/test/unittests/js-atomics/js-atomics-mutex-unittest.cc b/test/unittests/js-atomics/js-atomics-synchronization-primitive-unittest.cc similarity index 51% rename from test/unittests/js-atomics/js-atomics-mutex-unittest.cc rename to test/unittests/js-atomics/js-atomics-synchronization-primitive-unittest.cc index 4d31db581b..573301fb2d 100644 --- a/test/unittests/js-atomics/js-atomics-mutex-unittest.cc +++ b/test/unittests/js-atomics/js-atomics-synchronization-primitive-unittest.cc @@ -13,6 +13,7 @@ namespace v8 { namespace internal { using JSAtomicsMutexTest = TestWithSharedIsolate; +using JSAtomicsConditionTest = TestWithSharedIsolate; namespace { @@ -36,13 +37,26 @@ class ClientIsolateWithContextWrapper final { v8::Context::Scope context_scope_; }; -class LockingThread final : public v8::base::Thread { +class ParkingThread : public v8::base::Thread { + public: + explicit ParkingThread(const Options& options) : v8::base::Thread(options) {} + + void ParkedJoin(const ParkedScope& scope) { + USE(scope); + Join(); + } + + private: + using base::Thread::Join; +}; + +class LockingThread final : public ParkingThread { public: LockingThread(v8::Isolate* shared_isolate, Handle mutex, ParkingSemaphore* sema_ready, ParkingSemaphore* sema_execute_start, ParkingSemaphore* sema_execute_complete) - : Thread(Options("ThreadWithAtomicsMutex")), + : ParkingThread(Options("LockingThread")), shared_isolate_(shared_isolate), mutex_(mutex), sema_ready_(sema_ready), @@ -66,14 +80,7 @@ class LockingThread final : public v8::base::Thread { sema_execute_complete_->Signal(); } - void ParkedJoin(const ParkedScope& scope) { - USE(scope); - Join(); - } - - protected: - using base::Thread::Join; - + private: v8::Isolate* shared_isolate_; Handle mutex_; ParkingSemaphore* sema_ready_; @@ -125,5 +132,112 @@ TEST_F(JSAtomicsMutexTest, Contention) { EXPECT_FALSE(contended_mutex->IsHeld()); } +namespace { +class WaitOnConditionThread final : public ParkingThread { + public: + WaitOnConditionThread(v8::Isolate* shared_isolate, + Handle mutex, + Handle condition, + uint32_t* waiting_threads_count, + ParkingSemaphore* sema_ready, + ParkingSemaphore* sema_execute_complete) + : ParkingThread(Options("WaitOnConditionThread")), + shared_isolate_(shared_isolate), + mutex_(mutex), + condition_(condition), + waiting_threads_count_(waiting_threads_count), + sema_ready_(sema_ready), + sema_execute_complete_(sema_execute_complete) {} + + void Run() override { + ClientIsolateWithContextWrapper client_isolate_wrapper(shared_isolate_); + Isolate* isolate = client_isolate_wrapper.isolate(); + + sema_ready_->Signal(); + + HandleScope scope(isolate); + JSAtomicsMutex::Lock(isolate, mutex_); + while (keep_waiting) { + (*waiting_threads_count_)++; + EXPECT_TRUE(JSAtomicsCondition::WaitFor(isolate, condition_, mutex_, + base::nullopt)); + (*waiting_threads_count_)--; + } + mutex_->Unlock(isolate); + + sema_execute_complete_->Signal(); + } + + bool keep_waiting = true; + + private: + v8::Isolate* shared_isolate_; + Handle mutex_; + Handle condition_; + uint32_t* waiting_threads_count_; + ParkingSemaphore* sema_ready_; + ParkingSemaphore* sema_execute_complete_; +}; +} // namespace + +TEST_F(JSAtomicsConditionTest, NotifyAll) { + if (!IsJSSharedMemorySupported()) return; + + FLAG_harmony_struct = true; + + v8::Isolate* shared_isolate = v8_isolate(); + ClientIsolateWithContextWrapper client_isolate_wrapper(shared_isolate); + Isolate* client_isolate = client_isolate_wrapper.isolate(); + + constexpr uint32_t kThreads = 32; + + Handle mutex = JSAtomicsMutex::Create(client_isolate); + Handle condition = + JSAtomicsCondition::Create(client_isolate); + + uint32_t waiting_threads_count = 0; + ParkingSemaphore sema_ready(0); + ParkingSemaphore sema_execute_complete(0); + std::vector> threads; + for (uint32_t i = 0; i < kThreads; i++) { + auto thread = std::make_unique( + shared_isolate, mutex, condition, &waiting_threads_count, &sema_ready, + &sema_execute_complete); + CHECK(thread->Start()); + threads.push_back(std::move(thread)); + } + + LocalIsolate* local_isolate = client_isolate->main_thread_local_isolate(); + for (uint32_t i = 0; i < kThreads; i++) { + sema_ready.ParkedWait(local_isolate); + } + + // Wait until all threads are waiting on the condition. + for (;;) { + JSAtomicsMutex::LockGuard lock_guard(client_isolate, mutex); + uint32_t count = waiting_threads_count; + if (count == kThreads) break; + } + + // Wake all the threads up. + for (uint32_t i = 0; i < kThreads; i++) { + threads[i]->keep_waiting = false; + } + EXPECT_EQ(kThreads, + condition->Notify(client_isolate, JSAtomicsCondition::kAllWaiters)); + + for (uint32_t i = 0; i < kThreads; i++) { + sema_execute_complete.ParkedWait(local_isolate); + } + + ParkedScope parked(local_isolate); + for (auto& thread : threads) { + thread->ParkedJoin(parked); + } + + EXPECT_EQ(0U, waiting_threads_count); + EXPECT_FALSE(mutex->IsHeld()); +} + } // namespace internal } // namespace v8 diff --git a/tools/v8heapconst.py b/tools/v8heapconst.py index 82988acf8a..eaedd8f89b 100644 --- a/tools/v8heapconst.py +++ b/tools/v8heapconst.py @@ -224,57 +224,58 @@ INSTANCE_TYPES = { 2098: "JS_ASYNC_GENERATOR_OBJECT_TYPE", 2099: "JS_MAP_TYPE", 2100: "JS_SET_TYPE", - 2101: "JS_WEAK_MAP_TYPE", - 2102: "JS_WEAK_SET_TYPE", - 2103: "JS_ARGUMENTS_OBJECT_TYPE", - 2104: "JS_ARRAY_TYPE", - 2105: "JS_ARRAY_ITERATOR_TYPE", - 2106: "JS_ASYNC_FROM_SYNC_ITERATOR_TYPE", - 2107: "JS_ATOMICS_MUTEX_TYPE", - 2108: "JS_COLLATOR_TYPE", - 2109: "JS_CONTEXT_EXTENSION_OBJECT_TYPE", - 2110: "JS_DATE_TYPE", - 2111: "JS_DATE_TIME_FORMAT_TYPE", - 2112: "JS_DISPLAY_NAMES_TYPE", - 2113: "JS_ERROR_TYPE", - 2114: "JS_EXTERNAL_OBJECT_TYPE", - 2115: "JS_FINALIZATION_REGISTRY_TYPE", - 2116: "JS_LIST_FORMAT_TYPE", - 2117: "JS_LOCALE_TYPE", - 2118: "JS_MESSAGE_OBJECT_TYPE", - 2119: "JS_NUMBER_FORMAT_TYPE", - 2120: "JS_PLURAL_RULES_TYPE", - 2121: "JS_REG_EXP_TYPE", - 2122: "JS_REG_EXP_STRING_ITERATOR_TYPE", - 2123: "JS_RELATIVE_TIME_FORMAT_TYPE", - 2124: "JS_SEGMENT_ITERATOR_TYPE", - 2125: "JS_SEGMENTER_TYPE", - 2126: "JS_SEGMENTS_TYPE", - 2127: "JS_SHADOW_REALM_TYPE", - 2128: "JS_SHARED_ARRAY_TYPE", - 2129: "JS_SHARED_STRUCT_TYPE", - 2130: "JS_STRING_ITERATOR_TYPE", - 2131: "JS_TEMPORAL_CALENDAR_TYPE", - 2132: "JS_TEMPORAL_DURATION_TYPE", - 2133: "JS_TEMPORAL_INSTANT_TYPE", - 2134: "JS_TEMPORAL_PLAIN_DATE_TYPE", - 2135: "JS_TEMPORAL_PLAIN_DATE_TIME_TYPE", - 2136: "JS_TEMPORAL_PLAIN_MONTH_DAY_TYPE", - 2137: "JS_TEMPORAL_PLAIN_TIME_TYPE", - 2138: "JS_TEMPORAL_PLAIN_YEAR_MONTH_TYPE", - 2139: "JS_TEMPORAL_TIME_ZONE_TYPE", - 2140: "JS_TEMPORAL_ZONED_DATE_TIME_TYPE", - 2141: "JS_V8_BREAK_ITERATOR_TYPE", - 2142: "JS_WEAK_REF_TYPE", - 2143: "WASM_EXCEPTION_PACKAGE_TYPE", - 2144: "WASM_GLOBAL_OBJECT_TYPE", - 2145: "WASM_INSTANCE_OBJECT_TYPE", - 2146: "WASM_MEMORY_OBJECT_TYPE", - 2147: "WASM_MODULE_OBJECT_TYPE", - 2148: "WASM_SUSPENDER_OBJECT_TYPE", - 2149: "WASM_TABLE_OBJECT_TYPE", - 2150: "WASM_TAG_OBJECT_TYPE", - 2151: "WASM_VALUE_OBJECT_TYPE", + 2101: "JS_ATOMICS_CONDITION_TYPE", + 2102: "JS_ATOMICS_MUTEX_TYPE", + 2103: "JS_WEAK_MAP_TYPE", + 2104: "JS_WEAK_SET_TYPE", + 2105: "JS_ARGUMENTS_OBJECT_TYPE", + 2106: "JS_ARRAY_TYPE", + 2107: "JS_ARRAY_ITERATOR_TYPE", + 2108: "JS_ASYNC_FROM_SYNC_ITERATOR_TYPE", + 2109: "JS_COLLATOR_TYPE", + 2110: "JS_CONTEXT_EXTENSION_OBJECT_TYPE", + 2111: "JS_DATE_TYPE", + 2112: "JS_DATE_TIME_FORMAT_TYPE", + 2113: "JS_DISPLAY_NAMES_TYPE", + 2114: "JS_ERROR_TYPE", + 2115: "JS_EXTERNAL_OBJECT_TYPE", + 2116: "JS_FINALIZATION_REGISTRY_TYPE", + 2117: "JS_LIST_FORMAT_TYPE", + 2118: "JS_LOCALE_TYPE", + 2119: "JS_MESSAGE_OBJECT_TYPE", + 2120: "JS_NUMBER_FORMAT_TYPE", + 2121: "JS_PLURAL_RULES_TYPE", + 2122: "JS_REG_EXP_TYPE", + 2123: "JS_REG_EXP_STRING_ITERATOR_TYPE", + 2124: "JS_RELATIVE_TIME_FORMAT_TYPE", + 2125: "JS_SEGMENT_ITERATOR_TYPE", + 2126: "JS_SEGMENTER_TYPE", + 2127: "JS_SEGMENTS_TYPE", + 2128: "JS_SHADOW_REALM_TYPE", + 2129: "JS_SHARED_ARRAY_TYPE", + 2130: "JS_SHARED_STRUCT_TYPE", + 2131: "JS_STRING_ITERATOR_TYPE", + 2132: "JS_TEMPORAL_CALENDAR_TYPE", + 2133: "JS_TEMPORAL_DURATION_TYPE", + 2134: "JS_TEMPORAL_INSTANT_TYPE", + 2135: "JS_TEMPORAL_PLAIN_DATE_TYPE", + 2136: "JS_TEMPORAL_PLAIN_DATE_TIME_TYPE", + 2137: "JS_TEMPORAL_PLAIN_MONTH_DAY_TYPE", + 2138: "JS_TEMPORAL_PLAIN_TIME_TYPE", + 2139: "JS_TEMPORAL_PLAIN_YEAR_MONTH_TYPE", + 2140: "JS_TEMPORAL_TIME_ZONE_TYPE", + 2141: "JS_TEMPORAL_ZONED_DATE_TIME_TYPE", + 2142: "JS_V8_BREAK_ITERATOR_TYPE", + 2143: "JS_WEAK_REF_TYPE", + 2144: "WASM_EXCEPTION_PACKAGE_TYPE", + 2145: "WASM_GLOBAL_OBJECT_TYPE", + 2146: "WASM_INSTANCE_OBJECT_TYPE", + 2147: "WASM_MEMORY_OBJECT_TYPE", + 2148: "WASM_MODULE_OBJECT_TYPE", + 2149: "WASM_SUSPENDER_OBJECT_TYPE", + 2150: "WASM_TABLE_OBJECT_TYPE", + 2151: "WASM_TAG_OBJECT_TYPE", + 2152: "WASM_VALUE_OBJECT_TYPE", } # List of known V8 maps. @@ -454,8 +455,8 @@ KNOWN_MAPS = { ("read_only_space", 0x06a9d): (138, "StoreHandler1Map"), ("read_only_space", 0x06ac5): (138, "StoreHandler2Map"), ("read_only_space", 0x06aed): (138, "StoreHandler3Map"), - ("map_space", 0x02139): (2114, "ExternalMap"), - ("map_space", 0x02161): (2118, "JSMessageObjectMap"), + ("map_space", 0x02139): (2115, "ExternalMap"), + ("map_space", 0x02161): (2119, "JSMessageObjectMap"), } # List of known V8 objects.