[+] (reference futex primitive) AuFutexMutexSpecial = AuThreading::Waitables::FutexWaitableNoVTblMovable

[+] (reference futex primitive) AuFutexMutexSmallest = AuThreading::Waitables::FutexWaitableNoVTblMovableSmallest

...because i cant be arsed
This commit is contained in:
Reece Wilson 2024-04-16 04:21:42 +01:00
parent 7cb9355805
commit 62917318af
2 changed files with 259 additions and 0 deletions

View File

@ -91,6 +91,14 @@ using AuBinarySemaphore = AuThreadPrimitives::Event;
using AuEvent = AuThreadPrimitives::Event;
using AuFutexMutex = AuThreading::Waitables::FutexWaitable;
using AuFutexMutexSpecial = AuThreading::Waitables::FutexWaitableNoVTblMovable;
// Note: a reference U16/U8 reference impl would be pointless - threefold
// 1) x86_64 chokes on 16bit atomics;
// 2) 32bit RISC ISAs would prefer 32bit words;
// 3) sub 32bit words are just going to junk up our alignment & introduce pointless padding
using AuFutexMutexSmallest = AuThreading::Waitables::FutexWaitableNoVTblMovableSmallest;
// TODO:
//using AuFutexTicketMutex = AuThreading::Waitables::FutexTicketWaitable;
using AuFutexSemaphore = AuThreading::Waitables::FutexSemaphoreWaitable;
using AuFutexCond = AuThreading::Waitables::FutexCondWaitable;
using AuFutexCondition = AuThreading::Waitables::FutexCondWaitable;

View File

@ -153,4 +153,255 @@ namespace Aurora::Threading::Waitables
AuAUInt32 uAtomicState {};
AuAUInt32 uAtomicSleeping {};
};
struct FutexWaitableNoVTblMovable final
{
AU_COPY_MOVE_DEF(FutexWaitableNoVTblMovable);
inline bool TryLockNoSpin()
{
return AuAtomicTestAndSet(&this->uAtomicState, 0u) == 0;
}
inline bool TryLock()
{
static const AuUInt32 kRef { 1 };
if (TryLockNoSpin())
{
return true;
}
return TryWaitOnAddressEx((const void *)&this->uAtomicState,
&kRef,
sizeof(kRef),
[&](const void *pTargetAddress,
const void *pCompareAddress,
AuUInt8 uWordSize)
{
return this->TryLockNoSpin();
});
}
inline void Unlock()
{
AuAtomicClearU8Lock(&this->uAtomicState);
if (auto uSleeping = AuAtomicLoad(&this->uAtomicSleeping))
{
WakeOnAddress((const void *)&this->uAtomicState);
}
}
inline void Lock()
{
static const AuUInt32 kRef { 1 };
if (TryLock())
{
return;
}
while (!TryLockNoSpin())
{
AuAtomicAdd(&this->uAtomicSleeping, 1u);
WaitOnAddress((const void *)&this->uAtomicState, &kRef, sizeof(kRef), 0, true);
AuAtomicSub(&this->uAtomicSleeping, 1u);
}
}
inline bool LockMS(AuUInt64 qwTimeout)
{
return LockNS(AuMSToNS<AuUInt64>(qwTimeout));
}
inline bool LockAbsMS(AuUInt64 qwTimeout)
{
return LockAbsNS(AuMSToNS<AuUInt64>(qwTimeout));
}
inline bool LockNS(AuUInt64 qwTimeout)
{
static const AuUInt32 kRef { 1 };
if (TryLockNoSpin())
{
return true;
}
auto qwEndTime = Time::SteadyClockNS() + qwTimeout;
if (TryLock())
{
return true;
}
while (!TryLockNoSpin())
{
bool bStatus {};
AuAtomicAdd(&this->uAtomicSleeping, 1u);
bStatus = WaitOnAddressSteady((const void *)&this->uAtomicState, &kRef, sizeof(kRef), qwEndTime, true);
AuAtomicSub(&this->uAtomicSleeping, 1u);
if (!bStatus)
{
return TryLockNoSpin();
}
}
return true;
}
inline bool LockAbsNS(AuUInt64 qwTimeoutAbs)
{
static const AuUInt32 kRef { 1 };
if (TryLock())
{
return true;
}
while (!TryLockNoSpin())
{
bool bStatus {};
AuAtomicAdd(&this->uAtomicSleeping, 1u);
bStatus = WaitOnAddressSteady((const void *)&this->uAtomicState, &kRef, sizeof(kRef), qwTimeoutAbs, true);
AuAtomicSub(&this->uAtomicSleeping, 1u);
if (!bStatus)
{
return TryLockNoSpin();
}
}
return true;
}
AuAUInt32 uAtomicState {};
AuAUInt32 uAtomicSleeping {};
};
struct FutexWaitableNoVTblMovableSmallest final
{
AU_COPY_MOVE_DEF(FutexWaitableNoVTblMovableSmallest);
inline bool TryLockNoSpin()
{
return AuAtomicTestAndSet(&this->uAtomicState, 0u) == 0;
}
inline bool TryLock()
{
static const AuUInt32 kRef { 1 };
if (TryLockNoSpin())
{
return true;
}
return TryWaitOnAddressEx((const void *)&this->uAtomicState,
&kRef,
sizeof(kRef),
[&](const void *pTargetAddress,
const void *pCompareAddress,
AuUInt8 uWordSize)
{
return this->TryLockNoSpin();
});
}
inline void Unlock()
{
AuAtomicClearU8Lock(&this->uAtomicState);
WakeOnAddress((const void *)&this->uAtomicState);
}
inline void Lock()
{
static const AuUInt32 kRef { 1 };
if (TryLock())
{
return;
}
while (!TryLockNoSpin())
{
WaitOnAddress((const void *)&this->uAtomicState, &kRef, sizeof(kRef), 0, true);
}
}
inline bool LockMS(AuUInt64 qwTimeout)
{
return LockNS(AuMSToNS<AuUInt64>(qwTimeout));
}
inline bool LockAbsMS(AuUInt64 qwTimeout)
{
return LockAbsNS(AuMSToNS<AuUInt64>(qwTimeout));
}
inline bool LockNS(AuUInt64 qwTimeout)
{
static const AuUInt32 kRef { 1 };
if (TryLockNoSpin())
{
return true;
}
auto qwEndTime = Time::SteadyClockNS() + qwTimeout;
if (TryLock())
{
return true;
}
while (!TryLockNoSpin())
{
bool bStatus {};
bStatus = WaitOnAddressSteady((const void *)&this->uAtomicState, &kRef, sizeof(kRef), qwEndTime, true);
if (!bStatus)
{
return TryLockNoSpin();
}
}
return true;
}
inline bool LockAbsNS(AuUInt64 qwTimeoutAbs)
{
static const AuUInt32 kRef { 1 };
if (TryLock())
{
return true;
}
while (!TryLockNoSpin())
{
bool bStatus {};
bStatus = WaitOnAddressSteady((const void *)&this->uAtomicState, &kRef, sizeof(kRef), qwTimeoutAbs, true);
if (!bStatus)
{
return TryLockNoSpin();
}
}
return true;
}
// Note: a reference U16/U8 reference impl would be pointless - threefold
// 1) x86_64 chokes on 16bit atomics;
// 2) 32bit RISC ISAs would prefer 32bit words;
// 3) sub 32bit words are just going to junk up our alignment & introduce pointless padding
AuAUInt32 uAtomicState {};
};
}