Reland "[shared-struct] Add Atomics.Condition"

This is a reland of commit e2066ff6bf

Changes since revert:
- Rebased against c991852491, which
  uses the external pointer table for the WaiterQueueNode stored
  in the state field when compressing pointers. This relaxes
  the alignment requirement of the state field to be 4-bytes when
  compressing pointers.
- Moved the state field into the JSSynchronizationPrimitive base
  class, since alignment and padding can now be made simpler.

Original change's description:
> [shared-struct] Add Atomics.Condition
>
> Bug: v8:12547
> Change-Id: Id439aef9cab3348171a23378cdd47ede5f4d7288
> Cq-Include-Trybots: luci.v8.try:v8_linux_arm64_rel_ng,v8_linux64_tsan_rel_ng
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3630350
> Reviewed-by: Dominik Inführ <dinfuehr@chromium.org>
> Reviewed-by: Adam Klein <adamk@chromium.org>
> Commit-Queue: Shu-yu Guo <syg@chromium.org>
> Cr-Commit-Position: refs/heads/main@{#81734}

Bug: v8:12547
Change-Id: I638304c3d5722c64bd04708ed4cf84863cdebb81
Cq-Include-Trybots: luci.v8.try:v8_linux_arm64_rel_ng,v8_linux64_tsan_rel_ng
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3763787
Reviewed-by: Adam Klein <adamk@chromium.org>
Commit-Queue: Shu-yu Guo <syg@chromium.org>
Reviewed-by: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/main@{#82278}
This commit is contained in:
Shu-yu Guo 2022-08-08 16:19:43 -07:00 committed by V8 LUCI CQ
parent 7f62066e42
commit b1020a4345
29 changed files with 793 additions and 124 deletions

View File

@ -29,8 +29,8 @@ BUILTIN(AtomicsMutexLock) {
Handle<JSAtomicsMutex> js_mutex = Handle<JSAtomicsMutex>::cast(js_mutex_obj);
Handle<Object> run_under_lock = args.atOrUndefined(isolate, 2);
if (!run_under_lock->IsCallable()) {
THROW_NEW_ERROR_RETURN_FAILURE(isolate,
NewTypeError(MessageTemplate::kNotCallable));
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kNotCallable, run_under_lock));
}
// Like Atomics.wait, synchronous locking may block, and so is disallowed on
@ -39,7 +39,9 @@ BUILTIN(AtomicsMutexLock) {
// This is not a recursive lock, so also throw if recursively locking.
if (!isolate->allow_atomics_wait() || js_mutex->IsCurrentThreadOwner()) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kAtomicsMutexLockNotAllowed));
isolate, NewTypeError(MessageTemplate::kAtomicsOperationNotAllowed,
isolate->factory()->NewStringFromAsciiChecked(
method_name)));
}
Handle<Object> result;
@ -69,8 +71,8 @@ BUILTIN(AtomicsMutexTryLock) {
Handle<JSAtomicsMutex> js_mutex = Handle<JSAtomicsMutex>::cast(js_mutex_obj);
Handle<Object> run_under_lock = args.atOrUndefined(isolate, 2);
if (!run_under_lock->IsCallable()) {
THROW_NEW_ERROR_RETURN_FAILURE(isolate,
NewTypeError(MessageTemplate::kNotCallable));
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kNotCallable, run_under_lock));
}
JSAtomicsMutex::TryLockGuard try_lock_guard(isolate, js_mutex);
@ -86,5 +88,96 @@ BUILTIN(AtomicsMutexTryLock) {
return ReadOnlyRoots(isolate).false_value();
}
BUILTIN(AtomicsConditionConstructor) {
DCHECK(FLAG_harmony_struct);
HandleScope scope(isolate);
return *JSAtomicsCondition::Create(isolate);
}
BUILTIN(AtomicsConditionWait) {
DCHECK(FLAG_harmony_struct);
constexpr char method_name[] = "Atomics.Condition.wait";
HandleScope scope(isolate);
Handle<Object> js_condition_obj = args.atOrUndefined(isolate, 1);
Handle<Object> js_mutex_obj = args.atOrUndefined(isolate, 2);
Handle<Object> timeout_obj = args.atOrUndefined(isolate, 3);
if (!js_condition_obj->IsJSAtomicsCondition() ||
!js_mutex_obj->IsJSAtomicsMutex()) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kMethodInvokedOnWrongType,
isolate->factory()->NewStringFromAsciiChecked(
method_name)));
}
base::Optional<base::TimeDelta> timeout = base::nullopt;
if (!timeout_obj->IsUndefined(isolate)) {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, timeout_obj,
Object::ToNumber(isolate, timeout_obj));
double ms = timeout_obj->Number();
if (!std::isnan(ms)) {
if (ms < 0) ms = 0;
if (ms <= static_cast<double>(std::numeric_limits<int64_t>::max())) {
timeout = base::TimeDelta::FromMilliseconds(static_cast<int64_t>(ms));
}
}
}
if (!isolate->allow_atomics_wait()) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kAtomicsOperationNotAllowed,
isolate->factory()->NewStringFromAsciiChecked(
method_name)));
}
Handle<JSAtomicsCondition> js_condition =
Handle<JSAtomicsCondition>::cast(js_condition_obj);
Handle<JSAtomicsMutex> js_mutex = Handle<JSAtomicsMutex>::cast(js_mutex_obj);
if (!js_mutex->IsCurrentThreadOwner()) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate,
NewTypeError(MessageTemplate::kAtomicsMutexNotOwnedByCurrentThread));
}
return isolate->heap()->ToBoolean(
JSAtomicsCondition::WaitFor(isolate, js_condition, js_mutex, timeout));
}
BUILTIN(AtomicsConditionNotify) {
DCHECK(FLAG_harmony_struct);
constexpr char method_name[] = "Atomics.Condition.notify";
HandleScope scope(isolate);
Handle<Object> js_condition_obj = args.atOrUndefined(isolate, 1);
Handle<Object> count_obj = args.atOrUndefined(isolate, 2);
if (!js_condition_obj->IsJSAtomicsCondition()) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kMethodInvokedOnWrongType,
isolate->factory()->NewStringFromAsciiChecked(
method_name)));
}
uint32_t count;
if (count_obj->IsUndefined(isolate)) {
count = JSAtomicsCondition::kAllWaiters;
} else {
ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, count_obj,
Object::ToInteger(isolate, count_obj));
double count_double = count_obj->Number();
if (count_double < 0) {
count_double = 0;
} else if (count_double > JSAtomicsCondition::kAllWaiters) {
count_double = JSAtomicsCondition::kAllWaiters;
}
count = static_cast<uint32_t>(count_double);
}
Handle<JSAtomicsCondition> js_condition =
Handle<JSAtomicsCondition>::cast(js_condition_obj);
return *isolate->factory()->NewNumberFromUint(
js_condition->Notify(isolate, count));
}
} // namespace internal
} // namespace v8

View File

@ -1005,6 +1005,9 @@ namespace internal {
CPP(AtomicsMutexConstructor) \
CPP(AtomicsMutexLock) \
CPP(AtomicsMutexTryLock) \
CPP(AtomicsConditionConstructor) \
CPP(AtomicsConditionWait) \
CPP(AtomicsConditionNotify) \
\
/* AsyncGenerator */ \
\

View File

@ -231,7 +231,9 @@ Object DoWait(Isolate* isolate, FutexEmulation::WaitMode mode,
if (mode == FutexEmulation::WaitMode::kSync &&
!isolate->allow_atomics_wait()) {
THROW_NEW_ERROR_RETURN_FAILURE(
isolate, NewTypeError(MessageTemplate::kAtomicsWaitNotAllowed));
isolate, NewTypeError(MessageTemplate::kAtomicsOperationNotAllowed,
isolate->factory()->NewStringFromAsciiChecked(
"Atomics.wait")));
}
Handle<JSArrayBuffer> array_buffer = sta->GetBuffer();

View File

@ -41,9 +41,9 @@ namespace internal {
T(AwaitNotInDebugEvaluate, \
"await can not be used when evaluating code " \
"while paused in the debugger") \
T(AtomicsMutexLockNotAllowed, \
"Atomics.Mutex.lock cannot be called in this context") \
T(AtomicsWaitNotAllowed, "Atomics.wait cannot be called in this context") \
T(AtomicsMutexNotOwnedByCurrentThread, \
"Atomics.Mutex is not owned by the current agent") \
T(AtomicsOperationNotAllowed, "% cannot be called in this context") \
T(BadRoundingType, "RoundingType is not fractionDigits") \
T(BadSortComparisonFunction, \
"The comparison function must be either a function or undefined") \

View File

@ -264,6 +264,7 @@ Type::bitset BitsetType::Lub(const MapRefLike& map) {
case JS_SHADOW_REALM_TYPE:
case JS_SHARED_ARRAY_TYPE:
case JS_SHARED_STRUCT_TYPE:
case JS_ATOMICS_CONDITION_TYPE:
case JS_ATOMICS_MUTEX_TYPE:
case JS_TEMPORAL_CALENDAR_TYPE:
case JS_TEMPORAL_DURATION_TYPE:

View File

@ -551,7 +551,8 @@ void Map::MapVerify(Isolate* isolate) {
JSObject::GetEmbedderFieldCount(*this) * kEmbedderDataSlotSize,
inobject_fields_start_offset);
if (IsJSSharedStructMap() || IsJSSharedArrayMap()) {
if (IsJSSharedStructMap() || IsJSSharedArrayMap() || IsJSAtomicsMutex() ||
IsJSAtomicsCondition()) {
CHECK(InSharedHeap());
CHECK(GetBackPointer().IsUndefined(isolate));
Object maybe_cell = prototype_validity_cell();
@ -1265,10 +1266,12 @@ void JSAtomicsMutex::JSAtomicsMutexVerify(Isolate* isolate) {
CHECK(IsJSAtomicsMutex());
CHECK(InSharedWritableHeap());
JSObjectVerify(isolate);
Map mutex_map = map();
CHECK(mutex_map.GetBackPointer().IsUndefined(isolate));
CHECK(!mutex_map.is_extensible());
CHECK(!mutex_map.is_prototype_map());
}
void JSAtomicsCondition::JSAtomicsConditionVerify(Isolate* isolate) {
CHECK(IsJSAtomicsCondition());
CHECK(InSharedHeap());
JSObjectVerify(isolate);
}
void JSSharedArray::JSSharedArrayVerify(Isolate* isolate) {

View File

@ -1495,6 +1495,15 @@ void JSAtomicsMutex::JSAtomicsMutexPrint(std::ostream& os) {
JSObjectPrintBody(os, *this);
}
void JSAtomicsCondition::JSAtomicsConditionPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSAtomicsCondition");
Isolate* isolate = GetIsolateFromWritableObject(*this);
os << "\n - isolate: " << isolate;
if (isolate->is_shared()) os << " (shared)";
os << "\n - state: " << this->state();
JSObjectPrintBody(os, *this);
}
void JSWeakMap::JSWeakMapPrint(std::ostream& os) {
JSObjectPrintHeader(os, *this, "JSWeakMap");
os << "\n - table: " << Brief(table());

View File

@ -31,12 +31,12 @@ namespace internal {
V(FeedbackMetadata) \
V(FixedDoubleArray) \
V(JSArrayBuffer) \
V(JSAtomicsMutex) \
V(JSDataView) \
V(JSExternalObject) \
V(JSFinalizationRegistry) \
V(JSFunction) \
V(JSObject) \
V(JSSynchronizationPrimitive) \
V(JSTypedArray) \
V(WeakCell) \
V(JSWeakCollection) \

View File

@ -4663,8 +4663,10 @@ void Genesis::InitializeGlobal_harmony_struct() {
DONT_ENUM);
}
// TODO(v8:12547): Make a single canonical copy of the Mutex and Condition
// maps.
{ // Atomics.Mutex
// TODO(syg): Make a single canonical copy of the map.
Handle<String> mutex_str =
isolate()->factory()->InternalizeUtf8String("Mutex");
Handle<JSFunction> mutex_fun = CreateSharedObjectConstructor(
@ -4683,6 +4685,27 @@ void Genesis::InitializeGlobal_harmony_struct() {
SimpleInstallFunction(isolate(), mutex_fun, "tryLock",
Builtin::kAtomicsMutexTryLock, 2, true);
}
{ // Atomics.Condition
Handle<String> condition_str =
isolate()->factory()->InternalizeUtf8String("Condition");
Handle<JSFunction> condition_fun = CreateSharedObjectConstructor(
isolate(), condition_str, JS_ATOMICS_CONDITION_TYPE,
JSAtomicsCondition::kHeaderSize, TERMINAL_FAST_ELEMENTS_KIND,
Builtin::kAtomicsConditionConstructor);
condition_fun->shared().set_internal_formal_parameter_count(
JSParameterCount(0));
condition_fun->shared().set_length(0);
native_context()->set_js_atomics_condition_map(
condition_fun->initial_map());
JSObject::AddProperty(isolate(), isolate()->atomics_object(), condition_str,
condition_fun, DONT_ENUM);
SimpleInstallFunction(isolate(), condition_fun, "wait",
Builtin::kAtomicsConditionWait, 2, false);
SimpleInstallFunction(isolate(), condition_fun, "notify",
Builtin::kAtomicsConditionNotify, 2, false);
}
}
void Genesis::InitializeGlobal_harmony_array_find_last() {

View File

@ -178,6 +178,7 @@ enum ContextLookupFlags {
js_array_packed_double_elements_map) \
V(JS_ARRAY_HOLEY_DOUBLE_ELEMENTS_MAP_INDEX, Map, \
js_array_holey_double_elements_map) \
V(JS_ATOMICS_CONDITION_MAP, Map, js_atomics_condition_map) \
V(JS_ATOMICS_MUTEX_MAP, Map, js_atomics_mutex_map) \
V(JS_MAP_FUN_INDEX, JSFunction, js_map_fun) \
V(JS_MAP_MAP_INDEX, Map, js_map_map) \

View File

@ -19,6 +19,15 @@ namespace internal {
#include "torque-generated/src/objects/js-atomics-synchronization-tq-inl.inc"
TQ_OBJECT_CONSTRUCTORS_IMPL(JSSynchronizationPrimitive)
std::atomic<JSSynchronizationPrimitive::StateT>*
JSSynchronizationPrimitive::AtomicStatePtr() {
StateT* state_ptr = reinterpret_cast<StateT*>(field_address(kStateOffset));
DCHECK(IsAligned(reinterpret_cast<uintptr_t>(state_ptr), sizeof(StateT)));
return base::AsAtomicPtr(state_ptr);
}
TQ_OBJECT_CONSTRUCTORS_IMPL(JSAtomicsMutex)
CAST_ACCESSOR(JSAtomicsMutex)
@ -111,18 +120,16 @@ void JSAtomicsMutex::ClearOwnerThread() {
std::memory_order_relaxed);
}
std::atomic<JSAtomicsMutex::StateT>* JSAtomicsMutex::AtomicStatePtr() {
StateT* state_ptr = reinterpret_cast<StateT*>(field_address(kStateOffset));
DCHECK(IsAligned(reinterpret_cast<uintptr_t>(state_ptr), sizeof(StateT)));
return base::AsAtomicPtr(state_ptr);
}
std::atomic<int32_t>* JSAtomicsMutex::AtomicOwnerThreadIdPtr() {
int32_t* owner_thread_id_ptr =
reinterpret_cast<int32_t*>(field_address(kOwnerThreadIdOffset));
return base::AsAtomicPtr(owner_thread_id_ptr);
}
TQ_OBJECT_CONSTRUCTORS_IMPL(JSAtomicsCondition)
CAST_ACCESSOR(JSAtomicsCondition)
} // namespace internal
} // namespace v8

View File

@ -7,6 +7,7 @@
#include "src/base/macros.h"
#include "src/base/platform/condition-variable.h"
#include "src/base/platform/mutex.h"
#include "src/base/platform/time.h"
#include "src/base/platform/yield-processor.h"
#include "src/execution/isolate-inl.h"
#include "src/heap/parked-scope.h"
@ -51,7 +52,7 @@ class V8_NODISCARD WaiterQueueNode final {
static_cast<ExternalPointerHandle>(state & T::kWaiterQueueHeadMask);
if (handle == 0) return nullptr;
// The external pointer is cleared after decoding to prevent reuse by
// multiple mutexes in case of heap corruption.
// multiple synchronization primitives in case of heap corruption.
return reinterpret_cast<WaiterQueueNode*>(
requester->shared_external_pointer_table().Exchange(
handle, kNullAddress, kWaiterQueueNodeTag));
@ -63,6 +64,7 @@ class V8_NODISCARD WaiterQueueNode final {
// Enqueues {new_tail}, mutating {head} to be the new head.
static void Enqueue(WaiterQueueNode** head, WaiterQueueNode* new_tail) {
DCHECK_NOT_NULL(head);
new_tail->VerifyNotInList();
WaiterQueueNode* current_head = *head;
if (current_head == nullptr) {
new_tail->next_ = new_tail;
@ -77,8 +79,7 @@ class V8_NODISCARD WaiterQueueNode final {
}
}
// Dequeues a waiter and returns it; {head} is mutated to be the new
// head.
// Dequeues a waiter and returns it; mutating {head} to be the new head.
static WaiterQueueNode* Dequeue(WaiterQueueNode** head) {
DCHECK_NOT_NULL(head);
DCHECK_NOT_NULL(*head);
@ -92,9 +93,54 @@ class V8_NODISCARD WaiterQueueNode final {
tail->next_ = new_head;
*head = new_head;
}
current_head->SetNotInListForVerification();
return current_head;
}
// Splits at most {count} nodes of the waiter list of into its own list and
// returns it, mutating {head} to be the head of the back list.
static WaiterQueueNode* Split(WaiterQueueNode** head, uint32_t count) {
DCHECK_GT(count, 0);
DCHECK_NOT_NULL(head);
DCHECK_NOT_NULL(*head);
WaiterQueueNode* front_head = *head;
WaiterQueueNode* back_head = front_head;
uint32_t actual_count = 0;
while (actual_count < count) {
back_head = back_head->next_;
// The queue is shorter than the requested count, return the whole queue.
if (back_head == front_head) {
*head = nullptr;
return front_head;
}
actual_count++;
}
WaiterQueueNode* front_tail = back_head->prev_;
WaiterQueueNode* back_tail = front_head->prev_;
// Fix up the back list (i.e. remainder of the list).
back_head->prev_ = back_tail;
back_tail->next_ = back_head;
*head = back_head;
// Fix up and return the front list (i.e. the dequeued list).
front_head->prev_ = front_tail;
front_tail->next_ = front_head;
return front_head;
}
// This method must be called from a known waiter queue head. Incorrectly
// encoded lists can cause this method to infinitely loop.
static int LengthFromHead(WaiterQueueNode* head) {
WaiterQueueNode* cur = head;
int len = 0;
do {
len++;
cur = cur->next_;
} while (cur != head);
return len;
}
void Wait(Isolate* requester) {
AllowGarbageCollection allow_before_parking;
ParkedScope parked_scope(requester->main_thread_local_heap());
@ -104,15 +150,57 @@ class V8_NODISCARD WaiterQueueNode final {
}
}
// Returns false if timed out, true otherwise.
bool WaitFor(Isolate* requester, const base::TimeDelta& rel_time) {
AllowGarbageCollection allow_before_parking;
ParkedScope parked_scope(requester->main_thread_local_heap());
base::MutexGuard guard(&wait_lock_);
base::TimeTicks current_time = base::TimeTicks::Now();
base::TimeTicks timeout_time = current_time + rel_time;
for (;;) {
if (!should_wait) return true;
current_time = base::TimeTicks::Now();
if (current_time >= timeout_time) return false;
base::TimeDelta time_until_timeout = timeout_time - current_time;
bool wait_res = wait_cond_var_.WaitFor(&wait_lock_, time_until_timeout);
USE(wait_res);
// The wake up may have been spurious, so loop again.
}
}
void Notify() {
base::MutexGuard guard(&wait_lock_);
should_wait = false;
wait_cond_var_.NotifyOne();
SetNotInListForVerification();
}
uint32_t NotifyAllInList() {
WaiterQueueNode* cur = this;
uint32_t count = 0;
do {
WaiterQueueNode* next = cur->next_;
cur->Notify();
cur = next;
count++;
} while (cur != this);
return count;
}
bool should_wait = false;
private:
void VerifyNotInList() {
DCHECK_NULL(next_);
DCHECK_NULL(prev_);
}
void SetNotInListForVerification() {
#ifdef DEBUG
next_ = prev_ = nullptr;
#endif
}
// The queue wraps around, e.g. the head's prev is the tail, and the tail's
// next is the head.
WaiterQueueNode* next_ = nullptr;
@ -187,7 +275,7 @@ void JSAtomicsMutex::LockSlowPath(Isolate* requester,
// put the requester thread on the waiter queue.
// Allocate a waiter queue node on-stack, since this thread is going to
// sleep and will be blocked anyaway.
// sleep and will be blocked anyway.
WaiterQueueNode this_waiter;
{
@ -267,5 +355,157 @@ void JSAtomicsMutex::UnlockSlowPath(Isolate* requester,
old_head->Notify();
}
// static
Handle<JSAtomicsCondition> JSAtomicsCondition::Create(Isolate* isolate) {
auto* factory = isolate->factory();
Handle<Map> map = isolate->js_atomics_condition_map();
Handle<JSAtomicsCondition> cond = Handle<JSAtomicsCondition>::cast(
factory->NewJSObjectFromMap(map, AllocationType::kSharedOld));
cond->set_state(kEmptyState);
return cond;
}
bool JSAtomicsCondition::TryLockWaiterQueueExplicit(std::atomic<StateT>* state,
StateT& expected) {
// Try to acquire the queue lock.
expected &= ~kIsWaiterQueueLockedBit;
return state->compare_exchange_weak(
expected, expected | kIsWaiterQueueLockedBit, std::memory_order_acquire,
std::memory_order_relaxed);
}
// static
bool JSAtomicsCondition::WaitFor(Isolate* requester,
Handle<JSAtomicsCondition> cv,
Handle<JSAtomicsMutex> mutex,
base::Optional<base::TimeDelta> timeout) {
DisallowGarbageCollection no_gc;
// Allocate a waiter queue node on-stack, since this thread is going to sleep
// and will be blocked anyway.
WaiterQueueNode this_waiter;
{
// The state pointer should not be used outside of this block as a shared GC
// may reallocate it after waiting.
std::atomic<StateT>* state = cv->AtomicStatePtr();
// Try to acquire the queue lock, which is itself a spinlock.
StateT current_state = state->load(std::memory_order_relaxed);
while (!cv->TryLockWaiterQueueExplicit(state, current_state)) {
YIELD_PROCESSOR;
}
// With the queue lock held, enqueue the requester onto the waiter queue.
this_waiter.should_wait = true;
WaiterQueueNode* waiter_head =
WaiterQueueNode::DestructivelyDecodeHead<JSAtomicsCondition>(
requester, current_state);
WaiterQueueNode::Enqueue(&waiter_head, &this_waiter);
// Release the queue lock and install the new waiter queue head by creating
// a new state.
DCHECK_EQ(state->load(), current_state | kIsWaiterQueueLockedBit);
StateT new_state =
WaiterQueueNode::EncodeHead<JSAtomicsCondition>(requester, waiter_head);
state->store(new_state, std::memory_order_release);
}
// Release the mutex and wait for another thread to wake us up, reacquiring
// the mutex upon wakeup.
mutex->Unlock(requester);
bool rv;
if (timeout) {
rv = this_waiter.WaitFor(requester, *timeout);
} else {
this_waiter.Wait(requester);
rv = true;
}
JSAtomicsMutex::Lock(requester, mutex);
return rv;
}
uint32_t JSAtomicsCondition::Notify(Isolate* requester, uint32_t count) {
std::atomic<StateT>* state = AtomicStatePtr();
// To wake a sleeping thread, first acquire the queue lock, which is itself a
// spinlock.
StateT current_state = state->load(std::memory_order_relaxed);
// There are no waiters.
if (current_state == kEmptyState) return 0;
while (!TryLockWaiterQueueExplicit(state, current_state)) {
YIELD_PROCESSOR;
}
// Get the waiter queue head.
WaiterQueueNode* waiter_head =
WaiterQueueNode::DestructivelyDecodeHead<JSAtomicsCondition>(
requester, current_state);
// There's no waiter to wake up, release the queue lock by setting it to the
// empty state.
if (waiter_head == nullptr) {
DCHECK_EQ(state->load(), current_state | kIsWaiterQueueLockedBit);
state->store(kEmptyState, std::memory_order_release);
return 0;
}
WaiterQueueNode* old_head;
if (count == 1) {
old_head = WaiterQueueNode::Dequeue(&waiter_head);
} else if (count == kAllWaiters) {
old_head = waiter_head;
waiter_head = nullptr;
} else {
old_head = WaiterQueueNode::Split(&waiter_head, count);
}
// Release the queue lock and install the new waiter queue head by creating a
// new state.
DCHECK_EQ(state->load(), current_state | kIsWaiterQueueLockedBit);
StateT new_state =
WaiterQueueNode::EncodeHead<JSAtomicsCondition>(requester, waiter_head);
state->store(new_state, std::memory_order_release);
// Notify the waiters.
if (count == 1) {
old_head->Notify();
return 1;
}
return old_head->NotifyAllInList();
}
Object JSAtomicsCondition::NumWaitersForTesting(Isolate* isolate) {
DisallowGarbageCollection no_gc;
std::atomic<StateT>* state = AtomicStatePtr();
StateT current_state = state->load(std::memory_order_relaxed);
// There are no waiters.
if (current_state == kEmptyState) return Smi::FromInt(0);
int num_waiters;
{
// Take the queue lock.
while (!TryLockWaiterQueueExplicit(state, current_state)) {
YIELD_PROCESSOR;
}
// Get the waiter queue head.
WaiterQueueNode* waiter_head =
WaiterQueueNode::DestructivelyDecodeHead<JSAtomicsCondition>(
isolate, current_state);
num_waiters = WaiterQueueNode::LengthFromHead(waiter_head);
// Release the queue lock and reinstall the same queue head by creating a
// new state.
DCHECK_EQ(state->load(), current_state | kIsWaiterQueueLockedBit);
StateT new_state =
WaiterQueueNode::EncodeHead<JSAtomicsCondition>(isolate, waiter_head);
state->store(new_state, std::memory_order_release);
}
return Smi::FromInt(num_waiters);
}
} // namespace internal
} // namespace v8

View File

@ -7,6 +7,7 @@
#include <atomic>
#include "src/base/platform/time.h"
#include "src/execution/thread-id.h"
#include "src/objects/js-objects.h"
@ -22,12 +23,38 @@ namespace detail {
class WaiterQueueNode;
} // namespace detail
// Base class for JSAtomicsMutex and JSAtomicsCondition
class JSSynchronizationPrimitive
: public TorqueGeneratedJSSynchronizationPrimitive<
JSSynchronizationPrimitive, JSObject> {
public:
// Synchronization only store raw data as state.
static constexpr int kEndOfTaggedFieldsOffset = JSObject::kHeaderSize;
class BodyDescriptor;
TQ_OBJECT_CONSTRUCTORS(JSSynchronizationPrimitive)
protected:
#ifdef V8_COMPRESS_POINTERS
using StateT = uint32_t;
static_assert(sizeof(StateT) == sizeof(ExternalPointerHandle));
#else
using StateT = uintptr_t;
#endif // V8_COMPRESS_POINTERS
inline std::atomic<StateT>* AtomicStatePtr();
using TorqueGeneratedJSSynchronizationPrimitive<JSSynchronizationPrimitive,
JSObject>::state;
using TorqueGeneratedJSSynchronizationPrimitive<JSSynchronizationPrimitive,
JSObject>::set_state;
};
// A non-recursive mutex that is exposed to JS.
//
// It has the following properties:
// - Slim: 8-12 bytes. Lock state is 4 bytes when
// V8_SANDBOXED_EXTERNAL_POINTERS, and sizeof(void*) otherwise. Owner
// thread is an additional 4 bytes.
// - Slim: 8-12 bytes. Lock state is 4 bytes when V8_COMPRESS_POINTERS, and
// sizeof(void*) otherwise. Owner thread is an additional 4 bytes.
// - Fast when uncontended: a single weak CAS.
// - Possibly unfair under contention.
// - Moving GC safe. It uses an index into the shared Isolate's external
@ -41,7 +68,8 @@ class WaiterQueueNode;
// it implements a futex in userland. The algorithm is inspired by WebKit's
// ParkingLot.
class JSAtomicsMutex
: public TorqueGeneratedJSAtomicsMutex<JSAtomicsMutex, JSObject> {
: public TorqueGeneratedJSAtomicsMutex<JSAtomicsMutex,
JSSynchronizationPrimitive> {
public:
// A non-copyable wrapper class that provides an RAII-style mechanism for
// owning the JSAtomicsMutex.
@ -96,9 +124,6 @@ class JSAtomicsMutex
inline bool IsHeld();
inline bool IsCurrentThreadOwner();
static constexpr int kEndOfTaggedFieldsOffset = JSObject::kHeaderSize;
class BodyDescriptor;
TQ_OBJECT_CONSTRUCTORS(JSAtomicsMutex)
private:
@ -110,13 +135,6 @@ class JSAtomicsMutex
static constexpr int kIsWaiterQueueLockedBit = 1 << 1;
static constexpr int kLockBitsSize = 2;
#ifdef V8_COMPRESS_POINTERS
using StateT = uint32_t;
static_assert(sizeof(StateT) == sizeof(ExternalPointerHandle));
#else
using StateT = uintptr_t;
#endif
static constexpr StateT kUnlocked = 0;
static constexpr StateT kLockedUncontended = 1;
@ -126,7 +144,6 @@ class JSAtomicsMutex
inline void SetCurrentThreadAsOwner();
inline void ClearOwnerThread();
inline std::atomic<StateT>* AtomicStatePtr();
inline std::atomic<int32_t>* AtomicOwnerThreadIdPtr();
bool TryLockExplicit(std::atomic<StateT>* state, StateT& expected);
@ -138,12 +155,59 @@ class JSAtomicsMutex
V8_EXPORT_PRIVATE void UnlockSlowPath(Isolate* requester,
std::atomic<StateT>* state);
using TorqueGeneratedJSAtomicsMutex<JSAtomicsMutex, JSObject>::state;
using TorqueGeneratedJSAtomicsMutex<JSAtomicsMutex, JSObject>::set_state;
using TorqueGeneratedJSAtomicsMutex<JSAtomicsMutex,
JSObject>::owner_thread_id;
using TorqueGeneratedJSAtomicsMutex<JSAtomicsMutex,
JSObject>::set_owner_thread_id;
using TorqueGeneratedJSAtomicsMutex<
JSAtomicsMutex, JSSynchronizationPrimitive>::owner_thread_id;
using TorqueGeneratedJSAtomicsMutex<
JSAtomicsMutex, JSSynchronizationPrimitive>::set_owner_thread_id;
};
// A condition variable that is exposed to JS.
//
// It has the following properties:
// - Slim: 4-8 bytes. Lock state is 4 bytes when V8_COMPRESS_POINTERS, and
// sizeof(void*) otherwise.
// - Moving GC safe. It uses an index into the shared Isolate's external
// pointer table to store a queue of sleeping threads.
// - Parks the main thread LocalHeap when waiting. Unparks the main thread
// LocalHeap after waking up.
//
// This condition variable manages its own queue of waiting threads, like
// JSAtomicsMutex. The algorithm is inspired by WebKit's ParkingLot.
class JSAtomicsCondition
: public TorqueGeneratedJSAtomicsCondition<JSAtomicsCondition,
JSSynchronizationPrimitive> {
public:
DECL_CAST(JSAtomicsCondition)
DECL_PRINTER(JSAtomicsCondition)
EXPORT_DECL_VERIFIER(JSAtomicsCondition)
V8_EXPORT_PRIVATE static Handle<JSAtomicsCondition> Create(Isolate* isolate);
V8_EXPORT_PRIVATE static bool WaitFor(
Isolate* requester, Handle<JSAtomicsCondition> cv,
Handle<JSAtomicsMutex> mutex, base::Optional<base::TimeDelta> timeout);
static constexpr uint32_t kAllWaiters = UINT32_MAX;
// Notify {count} waiters. Returns the number of waiters woken up.
V8_EXPORT_PRIVATE uint32_t Notify(Isolate* requester, uint32_t count);
Object NumWaitersForTesting(Isolate* isolate);
TQ_OBJECT_CONSTRUCTORS(JSAtomicsCondition)
private:
friend class detail::WaiterQueueNode;
// There is 1 lock bit: whether the waiter queue is locked.
static constexpr int kIsWaiterQueueLockedBit = 1 << 0;
static constexpr int kLockBitsSize = 1;
static constexpr StateT kEmptyState = 0;
static constexpr StateT kLockBitsMask = (1 << kLockBitsSize) - 1;
static constexpr StateT kWaiterQueueHeadMask = ~kLockBitsMask;
bool TryLockWaiterQueueExplicit(std::atomic<StateT>* state, StateT& expected);
};
} // namespace internal

View File

@ -2,12 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
extern class JSAtomicsMutex extends JSObject {
@abstract
extern class JSSynchronizationPrimitive extends JSObject {
@if(TAGGED_SIZE_8_BYTES) state: uintptr;
@ifnot(TAGGED_SIZE_8_BYTES) state: uint32;
}
extern class JSAtomicsMutex extends JSSynchronizationPrimitive {
owner_thread_id: int32;
@if(TAGGED_SIZE_8_BYTES) optional_padding: uint32;
@ifnot(TAGGED_SIZE_8_BYTES) optional_padding: void;
}
extern class JSAtomicsCondition extends JSSynchronizationPrimitive {}

View File

@ -2474,6 +2474,8 @@ int JSObject::GetHeaderSize(InstanceType type,
return JSSharedStruct::kHeaderSize;
case JS_ATOMICS_MUTEX_TYPE:
return JSAtomicsMutex::kHeaderSize;
case JS_ATOMICS_CONDITION_TYPE:
return JSAtomicsCondition::kHeaderSize;
case JS_TEMPORAL_CALENDAR_TYPE:
return JSTemporalCalendar::kHeaderSize;
case JS_TEMPORAL_DURATION_TYPE:

View File

@ -324,6 +324,7 @@ VisitorId Map::GetVisitorId(Map map) {
#endif // V8_ENABLE_WEBASSEMBLY
case JS_BOUND_FUNCTION_TYPE:
case JS_WRAPPED_FUNCTION_TYPE: {
// Is GetEmbedderFieldCount(map) > 0 for Atomics.Mutex?
const bool has_raw_data_fields =
COMPRESS_POINTERS_BOOL && JSObject::GetEmbedderFieldCount(map) > 0;
return has_raw_data_fields ? kVisitJSObject : kVisitJSObjectFast;
@ -337,15 +338,16 @@ VisitorId Map::GetVisitorId(Map map) {
case JS_WEAK_REF_TYPE:
return kVisitJSWeakRef;
case JS_ATOMICS_MUTEX_TYPE:
return kVisitJSAtomicsMutex;
case WEAK_CELL_TYPE:
return kVisitWeakCell;
case JS_FINALIZATION_REGISTRY_TYPE:
return kVisitJSFinalizationRegistry;
case JS_ATOMICS_MUTEX_TYPE:
case JS_ATOMICS_CONDITION_TYPE:
return kVisitJSSynchronizationPrimitive;
case FILLER_TYPE:
case FOREIGN_TYPE:
case HEAP_NUMBER_TYPE:

View File

@ -47,13 +47,13 @@ enum InstanceType : uint16_t;
V(FreeSpace) \
V(JSApiObject) \
V(JSArrayBuffer) \
V(JSAtomicsMutex) \
V(JSDataView) \
V(JSExternalObject) \
V(JSFinalizationRegistry) \
V(JSFunction) \
V(JSObject) \
V(JSObjectFast) \
V(JSSynchronizationPrimitive) \
V(JSTypedArray) \
V(JSWeakRef) \
V(JSWeakCollection) \

View File

@ -132,6 +132,7 @@ class ZoneForwardList;
V(JSAsyncFromSyncIterator) \
V(JSAsyncFunctionObject) \
V(JSAsyncGeneratorObject) \
V(JSAtomicsCondition) \
V(JSAtomicsMutex) \
V(JSBoundFunction) \
V(JSCollection) \
@ -167,6 +168,7 @@ class ZoneForwardList;
V(JSSharedStruct) \
V(JSSpecialObject) \
V(JSStringIterator) \
V(JSSynchronizationPrimitive) \
V(JSTemporalCalendar) \
V(JSTemporalDuration) \
V(JSTemporalInstant) \

View File

@ -668,7 +668,8 @@ class JSWeakCollection::BodyDescriptorImpl final : public BodyDescriptorBase {
}
};
class JSAtomicsMutex::BodyDescriptor final : public BodyDescriptorBase {
class JSSynchronizationPrimitive::BodyDescriptor final
: public BodyDescriptorBase {
public:
static bool IsValidSlot(Map map, HeapObject obj, int offset) {
if (offset < kEndOfTaggedFieldsOffset) return true;
@ -680,7 +681,6 @@ class JSAtomicsMutex::BodyDescriptor final : public BodyDescriptorBase {
static inline void IterateBody(Map map, HeapObject obj, int object_size,
ObjectVisitor* v) {
IteratePointers(obj, kPropertiesOrHashOffset, kEndOfTaggedFieldsOffset, v);
IterateJSObjectBodyImpl(map, obj, kHeaderSize, object_size, v);
}
static inline int SizeOf(Map map, HeapObject object) {
@ -1309,7 +1309,8 @@ auto BodyDescriptorApply(InstanceType type, Args&&... args) {
case JS_PROXY_TYPE:
return CALL_APPLY(JSProxy);
case JS_ATOMICS_MUTEX_TYPE:
return CALL_APPLY(JSAtomicsMutex);
case JS_ATOMICS_CONDITION_TYPE:
return CALL_APPLY(JSSynchronizationPrimitive);
case FOREIGN_TYPE:
return CALL_APPLY(Foreign);
case MAP_TYPE:

View File

@ -1188,6 +1188,7 @@ bool Object::IsShared() const {
case JS_SHARED_ARRAY_TYPE:
case JS_SHARED_STRUCT_TYPE:
case JS_ATOMICS_MUTEX_TYPE:
case JS_ATOMICS_CONDITION_TYPE:
DCHECK(object.InSharedHeap());
return true;
case INTERNALIZED_STRING_TYPE:

View File

@ -615,6 +615,7 @@ Maybe<bool> ValueSerializer::WriteJSReceiver(Handle<JSReceiver> receiver) {
case JS_SHARED_STRUCT_TYPE:
return WriteJSSharedStruct(Handle<JSSharedStruct>::cast(receiver));
case JS_ATOMICS_MUTEX_TYPE:
case JS_ATOMICS_CONDITION_TYPE:
return WriteSharedObject(receiver);
#if V8_ENABLE_WEBASSEMBLY
case WASM_MODULE_OBJECT_TYPE:

View File

@ -23,6 +23,7 @@
#include "src/heap/heap-inl.h" // For ToBoolean. TODO(jkummerow): Drop.
#include "src/heap/heap-write-barrier-inl.h"
#include "src/ic/stub-cache.h"
#include "src/objects/js-atomics-synchronization-inl.h"
#include "src/objects/js-function-inl.h"
#include "src/objects/js-regexp-inl.h"
#include "src/objects/smi.h"
@ -1673,5 +1674,12 @@ RUNTIME_FUNCTION(Runtime_SharedGC) {
return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_AtomicsConditionNumWaitersForTesting) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
Handle<JSAtomicsCondition> cv = args.at<JSAtomicsCondition>(0);
return cv->NumWaitersForTesting(isolate);
}
} // namespace internal
} // namespace v8

View File

@ -106,8 +106,10 @@ class V8_NODISCARD ClearThreadInWasmScope {
Isolate* isolate_;
};
Object ThrowWasmError(Isolate* isolate, MessageTemplate message) {
Handle<JSObject> error_obj = isolate->factory()->NewWasmRuntimeError(message);
Object ThrowWasmError(Isolate* isolate, MessageTemplate message,
Handle<Object> arg0 = Handle<Object>()) {
Handle<JSObject> error_obj =
isolate->factory()->NewWasmRuntimeError(message, arg0);
JSObject::AddProperty(isolate, error_obj,
isolate->factory()->wasm_uncatchable_symbol(),
isolate->factory()->true_value(), NONE);
@ -370,7 +372,9 @@ RUNTIME_FUNCTION(Runtime_WasmI32AtomicWait) {
// Trap if memory is not shared, or wait is not allowed on the isolate
if (!array_buffer->is_shared() || !isolate->allow_atomics_wait()) {
return ThrowWasmError(isolate, MessageTemplate::kAtomicsWaitNotAllowed);
return ThrowWasmError(
isolate, MessageTemplate::kAtomicsOperationNotAllowed,
isolate->factory()->NewStringFromAsciiChecked("Atomics.wait"));
}
return FutexEmulation::WaitWasm32(isolate, array_buffer, offset,
expected_value, timeout_ns.AsInt64());
@ -393,7 +397,9 @@ RUNTIME_FUNCTION(Runtime_WasmI64AtomicWait) {
// Trap if memory is not shared, or if wait is not allowed on the isolate
if (!array_buffer->is_shared() || !isolate->allow_atomics_wait()) {
return ThrowWasmError(isolate, MessageTemplate::kAtomicsWaitNotAllowed);
return ThrowWasmError(
isolate, MessageTemplate::kAtomicsOperationNotAllowed,
isolate->factory()->NewStringFromAsciiChecked("Atomics.wait"));
}
return FutexEmulation::WaitWasm64(isolate, array_buffer, offset,
expected_value.AsInt64(),

View File

@ -65,7 +65,8 @@ namespace internal {
F(SetAllowAtomicsWait, 1, 1) \
F(AtomicsLoadSharedStructOrArray, 2, 1) \
F(AtomicsStoreSharedStructOrArray, 3, 1) \
F(AtomicsExchangeSharedStructOrArray, 3, 1)
F(AtomicsExchangeSharedStructOrArray, 3, 1) \
F(AtomicsConditionNumWaitersForTesting, 1, 1)
#define FOR_EACH_INTRINSIC_BIGINT(F, I) \
F(BigIntBinaryOp, 3, 1) \

View File

@ -0,0 +1,43 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// Flags: --harmony-struct --allow-natives-syntax
"use strict";
if (this.Worker) {
(function TestWait() {
let workerScript =
`onmessage = function(msg) {
let mutex = msg.mutex;
let cv = msg.cv;
let res = Atomics.Mutex.lock(mutex, function() {
return Atomics.Condition.wait(cv, mutex);
});
postMessage(res);
};`;
let mutex = new Atomics.Mutex;
let cv = new Atomics.Condition;
let msg = {mutex, cv};
let worker1 = new Worker(workerScript, { type: 'string' });
let worker2 = new Worker(workerScript, { type: 'string' });
worker1.postMessage(msg);
worker2.postMessage(msg);
// Spin until both workers are waiting.
while (%AtomicsConditionNumWaitersForTesting(cv) != 2) {}
assertEquals(2, Atomics.Condition.notify(cv, 2));
assertEquals(true, worker1.getMessage());
assertEquals(true, worker2.getMessage());
worker1.terminate();
worker2.terminate();
})();
}

View File

@ -0,0 +1,36 @@
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// Flags: --harmony-struct --allow-natives-syntax
let mutex = new Atomics.Mutex;
let cv = new Atomics.Condition;
(function TestConditionWaitNotAllowed() {
assertThrows(() => {
Atomics.Mutex.lock(mutex, () => {
%SetAllowAtomicsWait(false);
Atomics.Condition.wait(cv, mutex);
});
});
%SetAllowAtomicsWait(true);
})();
(function TestConditionMutexNotHeld() {
// Cannot wait on a mutex not owned by the current thread.
assertThrows(() => {
Atomics.Condition.wait(cv, mutex);
});
})();
(function TestConditionNoWaiters() {
// Notify returns number of threads woken up.
assertEquals(0, Atomics.Condition.notify(cv));
})();
(function TestConditionWaitTimeout() {
Atomics.Mutex.lock(mutex, () => {
assertEquals(false, Atomics.Condition.wait(cv, mutex, 100));
});
})();

View File

@ -415,7 +415,7 @@ v8_source_set("unittests_sources") {
"interpreter/source-position-matcher.cc",
"interpreter/source-position-matcher.h",
"interpreter/source-positions-unittest.cc",
"js-atomics/js-atomics-mutex-unittest.cc",
"js-atomics/js-atomics-synchronization-primitive-unittest.cc",
"libplatform/default-job-unittest.cc",
"libplatform/default-platform-unittest.cc",
"libplatform/default-worker-threads-task-runner-unittest.cc",

View File

@ -13,6 +13,7 @@ namespace v8 {
namespace internal {
using JSAtomicsMutexTest = TestWithSharedIsolate;
using JSAtomicsConditionTest = TestWithSharedIsolate;
namespace {
@ -36,13 +37,26 @@ class ClientIsolateWithContextWrapper final {
v8::Context::Scope context_scope_;
};
class LockingThread final : public v8::base::Thread {
class ParkingThread : public v8::base::Thread {
public:
explicit ParkingThread(const Options& options) : v8::base::Thread(options) {}
void ParkedJoin(const ParkedScope& scope) {
USE(scope);
Join();
}
private:
using base::Thread::Join;
};
class LockingThread final : public ParkingThread {
public:
LockingThread(v8::Isolate* shared_isolate, Handle<JSAtomicsMutex> mutex,
ParkingSemaphore* sema_ready,
ParkingSemaphore* sema_execute_start,
ParkingSemaphore* sema_execute_complete)
: Thread(Options("ThreadWithAtomicsMutex")),
: ParkingThread(Options("LockingThread")),
shared_isolate_(shared_isolate),
mutex_(mutex),
sema_ready_(sema_ready),
@ -66,14 +80,7 @@ class LockingThread final : public v8::base::Thread {
sema_execute_complete_->Signal();
}
void ParkedJoin(const ParkedScope& scope) {
USE(scope);
Join();
}
protected:
using base::Thread::Join;
private:
v8::Isolate* shared_isolate_;
Handle<JSAtomicsMutex> mutex_;
ParkingSemaphore* sema_ready_;
@ -125,5 +132,112 @@ TEST_F(JSAtomicsMutexTest, Contention) {
EXPECT_FALSE(contended_mutex->IsHeld());
}
namespace {
class WaitOnConditionThread final : public ParkingThread {
public:
WaitOnConditionThread(v8::Isolate* shared_isolate,
Handle<JSAtomicsMutex> mutex,
Handle<JSAtomicsCondition> condition,
uint32_t* waiting_threads_count,
ParkingSemaphore* sema_ready,
ParkingSemaphore* sema_execute_complete)
: ParkingThread(Options("WaitOnConditionThread")),
shared_isolate_(shared_isolate),
mutex_(mutex),
condition_(condition),
waiting_threads_count_(waiting_threads_count),
sema_ready_(sema_ready),
sema_execute_complete_(sema_execute_complete) {}
void Run() override {
ClientIsolateWithContextWrapper client_isolate_wrapper(shared_isolate_);
Isolate* isolate = client_isolate_wrapper.isolate();
sema_ready_->Signal();
HandleScope scope(isolate);
JSAtomicsMutex::Lock(isolate, mutex_);
while (keep_waiting) {
(*waiting_threads_count_)++;
EXPECT_TRUE(JSAtomicsCondition::WaitFor(isolate, condition_, mutex_,
base::nullopt));
(*waiting_threads_count_)--;
}
mutex_->Unlock(isolate);
sema_execute_complete_->Signal();
}
bool keep_waiting = true;
private:
v8::Isolate* shared_isolate_;
Handle<JSAtomicsMutex> mutex_;
Handle<JSAtomicsCondition> condition_;
uint32_t* waiting_threads_count_;
ParkingSemaphore* sema_ready_;
ParkingSemaphore* sema_execute_complete_;
};
} // namespace
TEST_F(JSAtomicsConditionTest, NotifyAll) {
if (!IsJSSharedMemorySupported()) return;
FLAG_harmony_struct = true;
v8::Isolate* shared_isolate = v8_isolate();
ClientIsolateWithContextWrapper client_isolate_wrapper(shared_isolate);
Isolate* client_isolate = client_isolate_wrapper.isolate();
constexpr uint32_t kThreads = 32;
Handle<JSAtomicsMutex> mutex = JSAtomicsMutex::Create(client_isolate);
Handle<JSAtomicsCondition> condition =
JSAtomicsCondition::Create(client_isolate);
uint32_t waiting_threads_count = 0;
ParkingSemaphore sema_ready(0);
ParkingSemaphore sema_execute_complete(0);
std::vector<std::unique_ptr<WaitOnConditionThread>> threads;
for (uint32_t i = 0; i < kThreads; i++) {
auto thread = std::make_unique<WaitOnConditionThread>(
shared_isolate, mutex, condition, &waiting_threads_count, &sema_ready,
&sema_execute_complete);
CHECK(thread->Start());
threads.push_back(std::move(thread));
}
LocalIsolate* local_isolate = client_isolate->main_thread_local_isolate();
for (uint32_t i = 0; i < kThreads; i++) {
sema_ready.ParkedWait(local_isolate);
}
// Wait until all threads are waiting on the condition.
for (;;) {
JSAtomicsMutex::LockGuard lock_guard(client_isolate, mutex);
uint32_t count = waiting_threads_count;
if (count == kThreads) break;
}
// Wake all the threads up.
for (uint32_t i = 0; i < kThreads; i++) {
threads[i]->keep_waiting = false;
}
EXPECT_EQ(kThreads,
condition->Notify(client_isolate, JSAtomicsCondition::kAllWaiters));
for (uint32_t i = 0; i < kThreads; i++) {
sema_execute_complete.ParkedWait(local_isolate);
}
ParkedScope parked(local_isolate);
for (auto& thread : threads) {
thread->ParkedJoin(parked);
}
EXPECT_EQ(0U, waiting_threads_count);
EXPECT_FALSE(mutex->IsHeld());
}
} // namespace internal
} // namespace v8

View File

@ -224,57 +224,58 @@ INSTANCE_TYPES = {
2098: "JS_ASYNC_GENERATOR_OBJECT_TYPE",
2099: "JS_MAP_TYPE",
2100: "JS_SET_TYPE",
2101: "JS_WEAK_MAP_TYPE",
2102: "JS_WEAK_SET_TYPE",
2103: "JS_ARGUMENTS_OBJECT_TYPE",
2104: "JS_ARRAY_TYPE",
2105: "JS_ARRAY_ITERATOR_TYPE",
2106: "JS_ASYNC_FROM_SYNC_ITERATOR_TYPE",
2107: "JS_ATOMICS_MUTEX_TYPE",
2108: "JS_COLLATOR_TYPE",
2109: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
2110: "JS_DATE_TYPE",
2111: "JS_DATE_TIME_FORMAT_TYPE",
2112: "JS_DISPLAY_NAMES_TYPE",
2113: "JS_ERROR_TYPE",
2114: "JS_EXTERNAL_OBJECT_TYPE",
2115: "JS_FINALIZATION_REGISTRY_TYPE",
2116: "JS_LIST_FORMAT_TYPE",
2117: "JS_LOCALE_TYPE",
2118: "JS_MESSAGE_OBJECT_TYPE",
2119: "JS_NUMBER_FORMAT_TYPE",
2120: "JS_PLURAL_RULES_TYPE",
2121: "JS_REG_EXP_TYPE",
2122: "JS_REG_EXP_STRING_ITERATOR_TYPE",
2123: "JS_RELATIVE_TIME_FORMAT_TYPE",
2124: "JS_SEGMENT_ITERATOR_TYPE",
2125: "JS_SEGMENTER_TYPE",
2126: "JS_SEGMENTS_TYPE",
2127: "JS_SHADOW_REALM_TYPE",
2128: "JS_SHARED_ARRAY_TYPE",
2129: "JS_SHARED_STRUCT_TYPE",
2130: "JS_STRING_ITERATOR_TYPE",
2131: "JS_TEMPORAL_CALENDAR_TYPE",
2132: "JS_TEMPORAL_DURATION_TYPE",
2133: "JS_TEMPORAL_INSTANT_TYPE",
2134: "JS_TEMPORAL_PLAIN_DATE_TYPE",
2135: "JS_TEMPORAL_PLAIN_DATE_TIME_TYPE",
2136: "JS_TEMPORAL_PLAIN_MONTH_DAY_TYPE",
2137: "JS_TEMPORAL_PLAIN_TIME_TYPE",
2138: "JS_TEMPORAL_PLAIN_YEAR_MONTH_TYPE",
2139: "JS_TEMPORAL_TIME_ZONE_TYPE",
2140: "JS_TEMPORAL_ZONED_DATE_TIME_TYPE",
2141: "JS_V8_BREAK_ITERATOR_TYPE",
2142: "JS_WEAK_REF_TYPE",
2143: "WASM_EXCEPTION_PACKAGE_TYPE",
2144: "WASM_GLOBAL_OBJECT_TYPE",
2145: "WASM_INSTANCE_OBJECT_TYPE",
2146: "WASM_MEMORY_OBJECT_TYPE",
2147: "WASM_MODULE_OBJECT_TYPE",
2148: "WASM_SUSPENDER_OBJECT_TYPE",
2149: "WASM_TABLE_OBJECT_TYPE",
2150: "WASM_TAG_OBJECT_TYPE",
2151: "WASM_VALUE_OBJECT_TYPE",
2101: "JS_ATOMICS_CONDITION_TYPE",
2102: "JS_ATOMICS_MUTEX_TYPE",
2103: "JS_WEAK_MAP_TYPE",
2104: "JS_WEAK_SET_TYPE",
2105: "JS_ARGUMENTS_OBJECT_TYPE",
2106: "JS_ARRAY_TYPE",
2107: "JS_ARRAY_ITERATOR_TYPE",
2108: "JS_ASYNC_FROM_SYNC_ITERATOR_TYPE",
2109: "JS_COLLATOR_TYPE",
2110: "JS_CONTEXT_EXTENSION_OBJECT_TYPE",
2111: "JS_DATE_TYPE",
2112: "JS_DATE_TIME_FORMAT_TYPE",
2113: "JS_DISPLAY_NAMES_TYPE",
2114: "JS_ERROR_TYPE",
2115: "JS_EXTERNAL_OBJECT_TYPE",
2116: "JS_FINALIZATION_REGISTRY_TYPE",
2117: "JS_LIST_FORMAT_TYPE",
2118: "JS_LOCALE_TYPE",
2119: "JS_MESSAGE_OBJECT_TYPE",
2120: "JS_NUMBER_FORMAT_TYPE",
2121: "JS_PLURAL_RULES_TYPE",
2122: "JS_REG_EXP_TYPE",
2123: "JS_REG_EXP_STRING_ITERATOR_TYPE",
2124: "JS_RELATIVE_TIME_FORMAT_TYPE",
2125: "JS_SEGMENT_ITERATOR_TYPE",
2126: "JS_SEGMENTER_TYPE",
2127: "JS_SEGMENTS_TYPE",
2128: "JS_SHADOW_REALM_TYPE",
2129: "JS_SHARED_ARRAY_TYPE",
2130: "JS_SHARED_STRUCT_TYPE",
2131: "JS_STRING_ITERATOR_TYPE",
2132: "JS_TEMPORAL_CALENDAR_TYPE",
2133: "JS_TEMPORAL_DURATION_TYPE",
2134: "JS_TEMPORAL_INSTANT_TYPE",
2135: "JS_TEMPORAL_PLAIN_DATE_TYPE",
2136: "JS_TEMPORAL_PLAIN_DATE_TIME_TYPE",
2137: "JS_TEMPORAL_PLAIN_MONTH_DAY_TYPE",
2138: "JS_TEMPORAL_PLAIN_TIME_TYPE",
2139: "JS_TEMPORAL_PLAIN_YEAR_MONTH_TYPE",
2140: "JS_TEMPORAL_TIME_ZONE_TYPE",
2141: "JS_TEMPORAL_ZONED_DATE_TIME_TYPE",
2142: "JS_V8_BREAK_ITERATOR_TYPE",
2143: "JS_WEAK_REF_TYPE",
2144: "WASM_EXCEPTION_PACKAGE_TYPE",
2145: "WASM_GLOBAL_OBJECT_TYPE",
2146: "WASM_INSTANCE_OBJECT_TYPE",
2147: "WASM_MEMORY_OBJECT_TYPE",
2148: "WASM_MODULE_OBJECT_TYPE",
2149: "WASM_SUSPENDER_OBJECT_TYPE",
2150: "WASM_TABLE_OBJECT_TYPE",
2151: "WASM_TAG_OBJECT_TYPE",
2152: "WASM_VALUE_OBJECT_TYPE",
}
# List of known V8 maps.
@ -454,8 +455,8 @@ KNOWN_MAPS = {
("read_only_space", 0x06a9d): (138, "StoreHandler1Map"),
("read_only_space", 0x06ac5): (138, "StoreHandler2Map"),
("read_only_space", 0x06aed): (138, "StoreHandler3Map"),
("map_space", 0x02139): (2114, "ExternalMap"),
("map_space", 0x02161): (2118, "JSMessageObjectMap"),
("map_space", 0x02139): (2115, "ExternalMap"),
("map_space", 0x02161): (2119, "JSMessageObjectMap"),
}
# List of known V8 objects.