AuroraRuntime/Include/Aurora/Threading/Waitables/FutexWaitable.hpp
Jamie Reece Wilson d520b0ce42 [*] I'm going to let the reference waitaible/woa/futex primitives be copy and movable now.
condvars -> cannot matter, you're creating a new object
mutex -> cannot mater, you're creating a new object, perhaps copy assignment under lock might be required depending on the parent. either way, a copy ctor is not required
semaphore -> copy state in case of the timelime barrier use case. semaphores emulating condvars and similar logic wont mind the overshoot. best case, we do want to copy the previous state as the initial count. worst case, your code is fundamentally broken.
2024-05-28 00:42:24 +01:00

424 lines
11 KiB
C++

/***
Copyright (C) 2023 J Reece Wilson (a/k/a "Reece"). All rights reserved.
File: FutexWaitable.hpp
Date: 2023-08-19
Author: Reece
***/
#pragma once
#include "../WakeOnAddress.hpp"
#include "../SpinTime.hpp"
namespace Aurora::Threading::Waitables
{
struct FutexWaitable final : IWaitable
{
inline constexpr FutexWaitable()
{}
AU_MOVE(FutexWaitable)
inline constexpr FutexWaitable(const FutexWaitable &that)
{
// NOP
}
AU_OPERATOR_COPY(FutexWaitable)
inline bool TryLockNoSpin()
{
return AuAtomicTestAndSet(&this->uAtomicState, 0u) == 0;
}
inline bool TryLock() override
{
static const AuUInt32 kRef { 1 };
if (TryLockNoSpin())
{
return true;
}
return TryWaitOnAddressEx((const void *)&this->uAtomicState,
&kRef,
sizeof(kRef),
[&](const void *pTargetAddress,
const void *pCompareAddress,
AuUInt8 uWordSize)
{
return this->TryLockNoSpin();
});
}
inline bool HasOSHandle(AuMach &mach) override
{
return false;
}
inline bool HasLockImplementation() override
{
return true;
}
inline void Unlock() override
{
AuAtomicClearU8Lock(&this->uAtomicState);
if (auto uSleeping = AuAtomicLoad(&this->uAtomicSleeping))
{
WakeOnAddress((const void *)&this->uAtomicState);
}
}
inline void Lock() override
{
static const AuUInt32 kRef { 1 };
if (TryLock())
{
return;
}
while (!TryLockNoSpin())
{
AuAtomicAdd(&this->uAtomicSleeping, 1u);
WaitOnAddress((const void *)&this->uAtomicState, &kRef, sizeof(kRef), 0, true);
AuAtomicSub(&this->uAtomicSleeping, 1u);
}
}
inline bool LockMS(AuUInt64 qwTimeout) override
{
return LockNS(AuMSToNS<AuUInt64>(qwTimeout));
}
inline bool LockAbsMS(AuUInt64 qwTimeout) override
{
return LockAbsNS(AuMSToNS<AuUInt64>(qwTimeout));
}
inline bool LockNS(AuUInt64 qwTimeout) override
{
static const AuUInt32 kRef { 1 };
if (TryLockNoSpin())
{
return true;
}
auto qwEndTime = Time::SteadyClockNS() + qwTimeout;
if (TryLock())
{
return true;
}
while (!TryLockNoSpin())
{
bool bStatus {};
AuAtomicAdd(&this->uAtomicSleeping, 1u);
bStatus = WaitOnAddressSteady((const void *)&this->uAtomicState, &kRef, sizeof(kRef), qwEndTime, true);
AuAtomicSub(&this->uAtomicSleeping, 1u);
if (!bStatus)
{
return TryLockNoSpin();
}
}
return true;
}
inline bool LockAbsNS(AuUInt64 qwTimeoutAbs) override
{
static const AuUInt32 kRef { 1 };
if (TryLock())
{
return true;
}
while (!TryLockNoSpin())
{
bool bStatus {};
AuAtomicAdd(&this->uAtomicSleeping, 1u);
bStatus = WaitOnAddressSteady((const void *)&this->uAtomicState, &kRef, sizeof(kRef), qwTimeoutAbs, true);
AuAtomicSub(&this->uAtomicSleeping, 1u);
if (!bStatus)
{
return TryLockNoSpin();
}
}
return true;
}
AuAUInt32 uAtomicState {};
AuAUInt32 uAtomicSleeping {};
};
struct FutexWaitableNoVTblMovable final
{
inline constexpr FutexWaitableNoVTblMovable()
{}
AU_MOVE(FutexWaitableNoVTblMovable)
inline constexpr FutexWaitableNoVTblMovable(const FutexWaitableNoVTblMovable &that)
{
// NOP
}
AU_OPERATOR_COPY(FutexWaitableNoVTblMovable)
inline bool TryLockNoSpin()
{
return AuAtomicTestAndSet(&this->uAtomicState, 0u) == 0;
}
inline bool TryLock()
{
static const AuUInt32 kRef { 1 };
if (TryLockNoSpin())
{
return true;
}
return TryWaitOnAddressEx((const void *)&this->uAtomicState,
&kRef,
sizeof(kRef),
[&](const void *pTargetAddress,
const void *pCompareAddress,
AuUInt8 uWordSize)
{
return this->TryLockNoSpin();
});
}
inline void Unlock()
{
AuAtomicClearU8Lock(&this->uAtomicState);
if (auto uSleeping = AuAtomicLoad(&this->uAtomicSleeping))
{
WakeOnAddress((const void *)&this->uAtomicState);
}
}
inline void Lock()
{
static const AuUInt32 kRef { 1 };
if (TryLock())
{
return;
}
while (!TryLockNoSpin())
{
AuAtomicAdd(&this->uAtomicSleeping, 1u);
WaitOnAddress((const void *)&this->uAtomicState, &kRef, sizeof(kRef), 0, true);
AuAtomicSub(&this->uAtomicSleeping, 1u);
}
}
inline bool LockMS(AuUInt64 qwTimeout)
{
return LockNS(AuMSToNS<AuUInt64>(qwTimeout));
}
inline bool LockAbsMS(AuUInt64 qwTimeout)
{
return LockAbsNS(AuMSToNS<AuUInt64>(qwTimeout));
}
inline bool LockNS(AuUInt64 qwTimeout)
{
static const AuUInt32 kRef { 1 };
if (TryLockNoSpin())
{
return true;
}
auto qwEndTime = Time::SteadyClockNS() + qwTimeout;
if (TryLock())
{
return true;
}
while (!TryLockNoSpin())
{
bool bStatus {};
AuAtomicAdd(&this->uAtomicSleeping, 1u);
bStatus = WaitOnAddressSteady((const void *)&this->uAtomicState, &kRef, sizeof(kRef), qwEndTime, true);
AuAtomicSub(&this->uAtomicSleeping, 1u);
if (!bStatus)
{
return TryLockNoSpin();
}
}
return true;
}
inline bool LockAbsNS(AuUInt64 qwTimeoutAbs)
{
static const AuUInt32 kRef { 1 };
if (TryLock())
{
return true;
}
while (!TryLockNoSpin())
{
bool bStatus {};
AuAtomicAdd(&this->uAtomicSleeping, 1u);
bStatus = WaitOnAddressSteady((const void *)&this->uAtomicState, &kRef, sizeof(kRef), qwTimeoutAbs, true);
AuAtomicSub(&this->uAtomicSleeping, 1u);
if (!bStatus)
{
return TryLockNoSpin();
}
}
return true;
}
AuAUInt32 uAtomicState {};
AuAUInt32 uAtomicSleeping {};
};
struct FutexWaitableNoVTblMovableSmallest final
{
//AU_COPY_MOVE_DEF(FutexWaitableNoVTblMovableSmallest);
AU_MOVE(FutexWaitableNoVTblMovableSmallest) AU_DEF(FutexWaitableNoVTblMovableSmallest)
inline FutexWaitableNoVTblMovableSmallest(const FutexWaitableNoVTblMovableSmallest &that)
{
// NOP
}
AU_OPERATOR_COPY(FutexWaitableNoVTblMovableSmallest)
inline bool TryLockNoSpin()
{
return AuAtomicTestAndSet(&this->uAtomicState, 0u) == 0;
}
inline bool TryLock()
{
static const AuUInt32 kRef { 1 };
if (TryLockNoSpin())
{
return true;
}
return TryWaitOnAddressEx((const void *)&this->uAtomicState,
&kRef,
sizeof(kRef),
[&](const void *pTargetAddress,
const void *pCompareAddress,
AuUInt8 uWordSize)
{
return this->TryLockNoSpin();
});
}
inline void Unlock()
{
AuAtomicClearU8Lock(&this->uAtomicState);
WakeOnAddress((const void *)&this->uAtomicState);
}
inline void Lock()
{
static const AuUInt32 kRef { 1 };
if (TryLock())
{
return;
}
while (!TryLockNoSpin())
{
WaitOnAddress((const void *)&this->uAtomicState, &kRef, sizeof(kRef), 0, true);
}
}
inline bool LockMS(AuUInt64 qwTimeout)
{
return LockNS(AuMSToNS<AuUInt64>(qwTimeout));
}
inline bool LockAbsMS(AuUInt64 qwTimeout)
{
return LockAbsNS(AuMSToNS<AuUInt64>(qwTimeout));
}
inline bool LockNS(AuUInt64 qwTimeout)
{
static const AuUInt32 kRef { 1 };
if (TryLockNoSpin())
{
return true;
}
auto qwEndTime = Time::SteadyClockNS() + qwTimeout;
if (TryLock())
{
return true;
}
while (!TryLockNoSpin())
{
bool bStatus {};
bStatus = WaitOnAddressSteady((const void *)&this->uAtomicState, &kRef, sizeof(kRef), qwEndTime, true);
if (!bStatus)
{
return TryLockNoSpin();
}
}
return true;
}
inline bool LockAbsNS(AuUInt64 qwTimeoutAbs)
{
static const AuUInt32 kRef { 1 };
if (TryLock())
{
return true;
}
while (!TryLockNoSpin())
{
bool bStatus {};
bStatus = WaitOnAddressSteady((const void *)&this->uAtomicState, &kRef, sizeof(kRef), qwTimeoutAbs, true);
if (!bStatus)
{
return TryLockNoSpin();
}
}
return true;
}
// Note: a reference U16/U8 reference impl would be pointless - threefold
// 1) x86_64 chokes on 16bit atomics;
// 2) 32bit RISC ISAs would prefer 32bit words;
// 3) sub 32bit words are just going to junk up our alignment & introduce pointless padding
AuAUInt32 uAtomicState {};
};
}