diff --git a/Source/Threading/Primitives/AuConditionVariable.NT.cpp b/Source/Threading/Primitives/AuConditionVariable.NT.cpp index a890823f..b82f73d6 100644 --- a/Source/Threading/Primitives/AuConditionVariable.NT.cpp +++ b/Source/Threading/Primitives/AuConditionVariable.NT.cpp @@ -151,8 +151,9 @@ namespace Aurora::Threading::Primitives } else { - if (bIOU || - (bSpin ? this->CheckOut() : this->CheckOutNoSpin())) + // Acquire our signal before spinning and potentially giving up on our timeslice. + // If we were awoken, try to acquire before yielding to a contested mutex. + if (bIOU || this->CheckOutNoSpin()) { pMutex->Lock(); return true; @@ -160,10 +161,11 @@ namespace Aurora::Threading::Primitives if (!gUseNativeWaitCondvar) { + // (NT 5-6.1) Unblocks one race condition, where another thread checks out our signal, and we return with nothing. + // Normal execution of the blocked thread can continue, so long as we trigger the keyed event as though this thread was signaled. + // This is only relevant in an edge case of multi-waiters spinning, rapid signaling, and the IOU being hit once. Do this instead of ticketing. this->SignalSpuriously(); } - - this->AddWaiter(); } } } @@ -211,8 +213,7 @@ namespace Aurora::Threading::Primitives pNtWaitForKeyedEvent(gKeyedEventHandle, (void *)&this->wlist, 0, nullptr); } - if (bIOU || - (bSpin ? this->CheckOut() : this->CheckOutNoSpin())) + if (bIOU || this->CheckOutNoSpin()) { pMutex->Lock(); return bRet; @@ -222,8 +223,6 @@ namespace Aurora::Threading::Primitives { this->SignalSpuriously(); } - - this->AddWaiter(); } } #else @@ -344,20 +343,22 @@ namespace Aurora::Threading::Primitives if (expected) { - // NOTE: missing this->signalCount atomic increment to force another AddWaiter under successful keyedevent return + // INTENTIONAL: Missing this->signalCount atomic increment to force another AddWaiter under successful keyedevent return + while (expected) { - if (AuAtomicCompareExchange(&this->wlist, ((expected - 1) << kShiftCountByBits) /*intentional clear*/, original) == original) + // INTENTIONAL: Removed the -1 so we dont have to rewatch the condvar, potentially missing a signal in the process + if (AuAtomicCompareExchange(&this->wlist, (expected << kShiftCountByBits) /*intentional clear*/, original) == original) { - if (gUseNativeWaitCondvar) - { - InternalLTSWakeOne((void *)&this->wlist); - } - else + if (!gUseNativeWaitCondvar) { pNtReleaseKeyedEvent(gKeyedEventHandle, (void *)&this->wlist, FALSE, nullptr); } + // INTENTIONAL: Removal of modernt branch. We err on the side of caution by failing awake, then its up to the last waking + // thread up process to block again. So long as wlist represents all the waiting threads, it doesn't matter, + // we will continue to be signalable withouts signal being lost or any thread blocking. + return; }