diff --git a/Include/Aurora/RuntimeAliases.hpp b/Include/Aurora/RuntimeAliases.hpp index acf0b9ad..125696c5 100644 --- a/Include/Aurora/RuntimeAliases.hpp +++ b/Include/Aurora/RuntimeAliases.hpp @@ -91,6 +91,14 @@ using AuBinarySemaphore = AuThreadPrimitives::Event; using AuEvent = AuThreadPrimitives::Event; using AuFutexMutex = AuThreading::Waitables::FutexWaitable; +using AuFutexMutexSpecial = AuThreading::Waitables::FutexWaitableNoVTblMovable; +// Note: a reference U16/U8 reference impl would be pointless - threefold +// 1) x86_64 chokes on 16bit atomics; +// 2) 32bit RISC ISAs would prefer 32bit words; +// 3) sub 32bit words are just going to junk up our alignment & introduce pointless padding +using AuFutexMutexSmallest = AuThreading::Waitables::FutexWaitableNoVTblMovableSmallest; +// TODO: +//using AuFutexTicketMutex = AuThreading::Waitables::FutexTicketWaitable; using AuFutexSemaphore = AuThreading::Waitables::FutexSemaphoreWaitable; using AuFutexCond = AuThreading::Waitables::FutexCondWaitable; using AuFutexCondition = AuThreading::Waitables::FutexCondWaitable; diff --git a/Include/Aurora/Threading/Waitables/FutexWaitable.hpp b/Include/Aurora/Threading/Waitables/FutexWaitable.hpp index dc114cd8..28868936 100644 --- a/Include/Aurora/Threading/Waitables/FutexWaitable.hpp +++ b/Include/Aurora/Threading/Waitables/FutexWaitable.hpp @@ -153,4 +153,255 @@ namespace Aurora::Threading::Waitables AuAUInt32 uAtomicState {}; AuAUInt32 uAtomicSleeping {}; }; + + struct FutexWaitableNoVTblMovable final + { + AU_COPY_MOVE_DEF(FutexWaitableNoVTblMovable); + + inline bool TryLockNoSpin() + { + return AuAtomicTestAndSet(&this->uAtomicState, 0u) == 0; + } + + inline bool TryLock() + { + static const AuUInt32 kRef { 1 }; + + if (TryLockNoSpin()) + { + return true; + } + + return TryWaitOnAddressEx((const void *)&this->uAtomicState, + &kRef, + sizeof(kRef), + [&](const void *pTargetAddress, + const void *pCompareAddress, + AuUInt8 uWordSize) + { + return this->TryLockNoSpin(); + }); + } + + inline void Unlock() + { + AuAtomicClearU8Lock(&this->uAtomicState); + + if (auto uSleeping = AuAtomicLoad(&this->uAtomicSleeping)) + { + WakeOnAddress((const void *)&this->uAtomicState); + } + } + + inline void Lock() + { + static const AuUInt32 kRef { 1 }; + + if (TryLock()) + { + return; + } + + while (!TryLockNoSpin()) + { + AuAtomicAdd(&this->uAtomicSleeping, 1u); + WaitOnAddress((const void *)&this->uAtomicState, &kRef, sizeof(kRef), 0, true); + AuAtomicSub(&this->uAtomicSleeping, 1u); + } + } + + inline bool LockMS(AuUInt64 qwTimeout) + { + return LockNS(AuMSToNS(qwTimeout)); + } + + inline bool LockAbsMS(AuUInt64 qwTimeout) + { + return LockAbsNS(AuMSToNS(qwTimeout)); + } + + inline bool LockNS(AuUInt64 qwTimeout) + { + static const AuUInt32 kRef { 1 }; + + if (TryLockNoSpin()) + { + return true; + } + + auto qwEndTime = Time::SteadyClockNS() + qwTimeout; + + if (TryLock()) + { + return true; + } + + while (!TryLockNoSpin()) + { + bool bStatus {}; + + AuAtomicAdd(&this->uAtomicSleeping, 1u); + bStatus = WaitOnAddressSteady((const void *)&this->uAtomicState, &kRef, sizeof(kRef), qwEndTime, true); + AuAtomicSub(&this->uAtomicSleeping, 1u); + + if (!bStatus) + { + return TryLockNoSpin(); + } + } + + return true; + } + + inline bool LockAbsNS(AuUInt64 qwTimeoutAbs) + { + static const AuUInt32 kRef { 1 }; + + if (TryLock()) + { + return true; + } + + while (!TryLockNoSpin()) + { + bool bStatus {}; + + AuAtomicAdd(&this->uAtomicSleeping, 1u); + bStatus = WaitOnAddressSteady((const void *)&this->uAtomicState, &kRef, sizeof(kRef), qwTimeoutAbs, true); + AuAtomicSub(&this->uAtomicSleeping, 1u); + + if (!bStatus) + { + return TryLockNoSpin(); + } + } + + return true; + } + + AuAUInt32 uAtomicState {}; + AuAUInt32 uAtomicSleeping {}; + }; + + struct FutexWaitableNoVTblMovableSmallest final + { + AU_COPY_MOVE_DEF(FutexWaitableNoVTblMovableSmallest); + + inline bool TryLockNoSpin() + { + return AuAtomicTestAndSet(&this->uAtomicState, 0u) == 0; + } + + inline bool TryLock() + { + static const AuUInt32 kRef { 1 }; + + if (TryLockNoSpin()) + { + return true; + } + + return TryWaitOnAddressEx((const void *)&this->uAtomicState, + &kRef, + sizeof(kRef), + [&](const void *pTargetAddress, + const void *pCompareAddress, + AuUInt8 uWordSize) + { + return this->TryLockNoSpin(); + }); + } + + inline void Unlock() + { + AuAtomicClearU8Lock(&this->uAtomicState); + WakeOnAddress((const void *)&this->uAtomicState); + } + + inline void Lock() + { + static const AuUInt32 kRef { 1 }; + + if (TryLock()) + { + return; + } + + while (!TryLockNoSpin()) + { + WaitOnAddress((const void *)&this->uAtomicState, &kRef, sizeof(kRef), 0, true); + } + } + + inline bool LockMS(AuUInt64 qwTimeout) + { + return LockNS(AuMSToNS(qwTimeout)); + } + + inline bool LockAbsMS(AuUInt64 qwTimeout) + { + return LockAbsNS(AuMSToNS(qwTimeout)); + } + + inline bool LockNS(AuUInt64 qwTimeout) + { + static const AuUInt32 kRef { 1 }; + + if (TryLockNoSpin()) + { + return true; + } + + auto qwEndTime = Time::SteadyClockNS() + qwTimeout; + + if (TryLock()) + { + return true; + } + + while (!TryLockNoSpin()) + { + bool bStatus {}; + + bStatus = WaitOnAddressSteady((const void *)&this->uAtomicState, &kRef, sizeof(kRef), qwEndTime, true); + + if (!bStatus) + { + return TryLockNoSpin(); + } + } + + return true; + } + + inline bool LockAbsNS(AuUInt64 qwTimeoutAbs) + { + static const AuUInt32 kRef { 1 }; + + if (TryLock()) + { + return true; + } + + while (!TryLockNoSpin()) + { + bool bStatus {}; + + bStatus = WaitOnAddressSteady((const void *)&this->uAtomicState, &kRef, sizeof(kRef), qwTimeoutAbs, true); + + if (!bStatus) + { + return TryLockNoSpin(); + } + } + + return true; + } + + // Note: a reference U16/U8 reference impl would be pointless - threefold + // 1) x86_64 chokes on 16bit atomics; + // 2) 32bit RISC ISAs would prefer 32bit words; + // 3) sub 32bit words are just going to junk up our alignment & introduce pointless padding + AuAUInt32 uAtomicState {}; + }; } \ No newline at end of file