[*] Harden WaitForMultipleAddressesOr and WaitForMultipleAddressesAnd against AuStopTheWorld thread termination

This commit is contained in:
Reece Wilson 2024-11-29 12:34:42 +00:00
parent d7a27f1a7d
commit 315d35e5ea
3 changed files with 88 additions and 44 deletions

View File

@ -19,6 +19,7 @@ namespace Aurora::Threading
return gHolderRWLock->AsReadable();
}
// See GetShutdownReadLock comment
void WakeOnAddressHoldContainersAndTheWorld(const AuVoidFunc &func)
{
// We * must * allow ::GetShutdownLock() to be recursive, no matter how the runtime is configured
@ -32,6 +33,42 @@ namespace Aurora::Threading
for (AU_ITERATE_N(i, kDefaultWaitPerProcess))
{
gProcessWaitables.list[i].Lock();
// Second - nested: unlink all wait multiple heads and send a spurious signal for WaitForMultipleAddressesOr to handle.
// Best case: WaitForMultipleAddressesOr notices it's spurious and goes back to sleep
// Worst case: a thread we intend to kill is now unlinked and gets stuck at the its' lock to reinsert its head
{
auto pCurrentHead = gProcessWaitables.list[i].waitList.pTail;
while (pCurrentHead)
{
if (auto pSpecial = pCurrentHead->pSpecial)
{
// pSpecial swill remain alive until it's removed by the caller (we own the container lock so gl with that)
AuAtomicStore<AuUInt8>(&pCurrentHead->bAlive, 0);
#if defined(WOA_SEMAPHORE_MODE)
pCurrentHead->semaphore->Unlock(1);
#else
pCurrentHead->variable.Signal();
#endif
auto uCount = pSpecial->waitArray.Count<WaitMultipleEntry>();
auto pBase = pSpecial->waitArray.Begin<WaitMultipleEntry>();
for (AU_ITERATE_N(z, uCount))
{
gProcessWaitables.list[i].RemoveEntry<true>(pBase[z].pTargetAddress, pCurrentHead);
}
pCurrentHead = gProcessWaitables.list[i].waitList.pTail;
}
else
{
pCurrentHead = pCurrentHead->pBefore;
}
}
}
}
{

View File

@ -29,17 +29,6 @@
namespace Aurora::Threading
{
struct WaitMulipleContainer;
struct MultipleInternalContext
{
WaitState state;
WaitEntry *pBefore {};
WaitEntry *pNext {};
bool bOldIgnore {};
AuUInt16 uOldStateChangedCounter {};
};
static WaitEntry **GetPBeforeFromContainer(const WaitMulipleContainer *pContainer, const void *pAddress);
static WaitEntry **GetPNextFromContainer(const WaitMulipleContainer *pContainer, const void *pAddress);
static const void *GetPCompareFromContainer(const WaitMulipleContainer *pContainer, const void *pAddress);
@ -845,11 +834,8 @@ namespace Aurora::Threading
{
if (auto pLoadFromMemory = this->waitList.pHead)
{
if (pLoadFromMemory != pReturn)
{
pLoadFromMemory->SetBefore(pAddress, pReturn);
pReturn->SetNext(pAddress, pLoadFromMemory);
}
pLoadFromMemory->SetBefore(pAddress, pReturn);
pReturn->SetNext(pAddress, pLoadFromMemory);
}
else
{

View File

@ -33,31 +33,43 @@ namespace Aurora::Threading
struct WaitBuffer
{
char buffer[32];
char buffer[32];
AuUInt8 uSize;
WOAFAST static WaitBuffer From(const void *pBuf, AuUInt8 uSize);
WOAFAST static bool Compare(const void *pHotAddress, AuUInt8 uSize, WaitState &state);
WOAFAST static bool Compare(const void *pHotAddress, AuUInt8 uSize, const void *pCompare, AuUInt64 uMask, EWaitMethod eMethod);
WOAFAST static bool Compare(const void *pHotAddress,
AuUInt8 uSize,
WaitState &state);
WOAFAST static bool Compare(const void *pHotAddress,
AuUInt8 uSize,
const void *pCompare,
AuUInt64 uMask,
EWaitMethod eMethod);
// returns false when valid
template <EWaitMethod eMethod, bool bFast = false>
WOAFAST static bool Compare2(const void *pHotAddress, AuUInt8 uSize, const void *pReference, AuUInt64 uMask = 0xFFFFFFFFFFFFFFFF);
WOAFAST static bool Compare2(const void *pHotAddress,
AuUInt8 uSize,
const void *pReference,
AuUInt64 uMask = 0xFFFFFFFFFFFFFFFF);
template <EWaitMethod eMethod, bool bFast = false>
WOAFAST static bool Compare2(const volatile void *pHotAddress, AuUInt8 uSize, const void *pReference, AuUInt64 uMask = 0xFFFFFFFFFFFFFFFF);
WOAFAST static bool Compare2(const volatile void *pHotAddress,
AuUInt8 uSize,
const void *pReference,
AuUInt64 uMask = 0xFFFFFFFFFFFFFFFF);
};
struct WaitState
{
WaitBuffer compare;
WaitBuffer compare;
//AuOptionalEx<AuUInt64> qwNanoseconds;
AuOptionalEx<AuUInt64> qwNanosecondsAbs;
AuUInt64 uDownsizeMask { 0xFFFFFFFFFFFFFFFF };
AuUInt32 uWordSize {};
const void *pCompare2 {};
EWaitMethod eWaitMethod { EWaitMethod::eNotEqual };
AuUInt64 uDownsizeMask { 0xFFFFFFFFFFFFFFFF };
AuUInt32 uWordSize { };
const void * pCompare2 { };
EWaitMethod eWaitMethod { EWaitMethod::eNotEqual };
};
struct WaitEntry
@ -65,15 +77,15 @@ namespace Aurora::Threading
WaitEntry();
~WaitEntry();
WaitEntry * volatile pNext {};
WaitEntry * volatile pBefore {};
const WaitMulipleContainer * volatile pSpecial {};
WaitEntry * volatile pNext {};
WaitEntry * volatile pBefore {};
const WaitMulipleContainer * volatile pSpecial {};
// synch
#if defined(WOA_SEMAPHORE_MODE)
#if !defined(WOA_SEMAPHORE_SEMAPHORE)
Primitives::Semaphore semaphore;
Primitives::Semaphore semaphore;
#else
// Recommended for XNU targets:
WOA_SEMAPHORE_SEMAPHORE semaphore;
@ -86,30 +98,30 @@ namespace Aurora::Threading
// !!! also note: container spinlocks =/= WaitEntry::mutex !!
#if !defined(WOA_CONDVAR_MUTEX)
Primitives::ConditionMutexInternal mutex; // mutex ctor must come before var
Primitives::ConditionMutexInternal mutex; // mutex ctor must come before var
Primitives::ConditionVariableInternal variable; // ...and something all 2007+ micro and monolithic kernels should have; an event or semaphore primitive on which we can form a crude condvar
#else
WOA_CONDVAR_MUTEX mutex;
WOA_CONDVAR_VARIABLE variable;
WOA_CONDVAR_MUTEX mutex;
WOA_CONDVAR_VARIABLE variable;
#endif
#endif
// state
const void *pAddress {};
AuUInt8 uSize {};
const void *pCompareAddress {};
EWaitMethod eWaitMethod { EWaitMethod::eNotEqual };
// state for the signal side [no multiple]
const void * pAddress {};
AuUInt8 uSize {};
const void * pCompareAddress {};
EWaitMethod eWaitMethod { EWaitMethod::eNotEqual };
// bookkeeping (parent container)
volatile AuUInt8 bAlive {}; // wait entry validity. must be rechecked for each spurious or expected wake, if the comparison doesn't break the yield loop.
// if false, and we're still yielding under pCompare == pAddress, we must reschedule with inverse order (as to steal the next signal, as opposed to waiting last)
void Release();
volatile AuUInt8 bAlive {}; // wait entry validity. must be rechecked for each spurious or expected wake, if the comparison doesn't break the yield loop.
// if false, and we're still yielding under pCompare == pAddress, we must reschedule with inverse order (as to steal the next signal, as opposed to waiting last)
void Release();
template <EWaitMethod eMethod>
bool SleepOn(WaitState &state);
bool SleepLossy(AuUInt64 qwNanosecondsAbs);
bool TrySignalAddress(const void *pAddress);
bool SleepOn(WaitState &state);
bool SleepLossy(AuUInt64 qwNanosecondsAbs);
bool TrySignalAddress(const void *pAddress);
auline WaitEntry *GetNext(const void *pAddress);
auline void SetNext(const void *pAddress, WaitEntry *pNext);
@ -159,4 +171,13 @@ namespace Aurora::Threading
};
inline ProcessWaitContainer gProcessWaitables;
struct MultipleInternalContext
{
WaitState state;
WaitEntry * pBefore {};
WaitEntry * pNext {};
bool bOldIgnore {};
AuUInt16 uOldStateChangedCounter {};
};
}