AuroraRuntime/Source/Threading/AuWaitFor.cpp
J Reece Wilson 82ed6e5617 [*] AuAsync build regression
[*] Fix potential for null deref under net adapters api
[*] Improved generic IO WaitFor * AND * (still suxs)
2024-10-09 01:59:50 +01:00

214 lines
5.8 KiB
C++

/***
Copyright (C) 2021 J Reece Wilson (a/k/a "Reece"). All rights reserved.
File: AuWaitFor.cpp
Date: 2021-6-12
Author: Reece
***/
#include <Source/RuntimeInternal.hpp>
#include "AuWaitFor.hpp"
#include "Primitives/SMTYield.hpp"
#if defined(AURORA_IS_POSIX_DERIVED)
#include <sched.h>
#endif
namespace Aurora::Threading
{
static void YieldToSharedCore(long uSpin)
{
#if (defined(AURORA_ARCH_X64) || defined(AURORA_ARCH_X86))
auto loops = __rdtsc() + (1ull << uSpin);
while (loops > __rdtsc())
{
_mm_pause(); _mm_pause(); _mm_pause(); _mm_pause();
_mm_pause(); _mm_pause(); _mm_pause(); _mm_pause();
_mm_pause(); _mm_pause(); _mm_pause(); _mm_pause();
_mm_pause(); _mm_pause(); _mm_pause(); _mm_pause();
}
#else
auto uRemainingTicks = (1ull << uSpin);
while (uRemainingTicks > 0)
{
Primitives::SMPPause();
uRemainingTicks -= 1;
}
#endif
}
AUKN_SYM void ContextYield()
{
#if defined(AURORA_IS_MODERNNT_DERIVED)
::SwitchToThread();
#elif defined(AURORA_IS_POSIX_DERIVED)
::sched_yield();
#else
YieldToSharedCore(12);
#endif
}
AUKN_SYM bool YieldPollNs(bool bPermitMultipleContextSwitches, AuUInt64 qwAbsTimeoutNs, const PollCallback_cb &cb)
{
while (!Primitives::DoTryIf(cb))
{
if (qwAbsTimeoutNs &&
Time::SteadyClockNS() >= qwAbsTimeoutNs)
{
return false;
}
if (bPermitMultipleContextSwitches)
{
ContextYield();
}
}
return true;
}
AUKN_SYM bool YieldPoll(bool bPermitMultipleContextSwitches, AuUInt64 qwTimeoutMs, const PollCallback_cb &cb)
{
return YieldPollNs(bPermitMultipleContextSwitches,
qwTimeoutMs ? Time::SteadyClockNS() + AuMSToNS<AuUInt64>(qwTimeoutMs) : 0,
cb);
}
AUKN_SYM bool WaitForAbsNS(IWaitable *pWaitable, AuUInt64 qwAbsTimeout)
{
SysCheckArgNotNull(pWaitable, false);
if (pWaitable->HasLockImplementation())
{
return pWaitable->LockAbsNS(qwAbsTimeout);
}
return YieldPollNs(true, qwAbsTimeout, [=]()
{
return pWaitable->TryLock();
});
}
AUKN_SYM bool TryWait(IWaitable *pWaitable)
{
SysCheckArgNotNull(pWaitable, false);
if (pWaitable->HasLockImplementation())
{
return pWaitable->TryLock();
}
return Primitives::DoTryIf([=]()
{
return pWaitable->TryLock();
});
}
AUKN_SYM bool WaitFor(const AuList<IWaitable *> &waitables, AuUInt32 uFlags, AuUInt64 uTimeout, AuUInt32 *pIndexAwoken)
{
AU_DEBUG_MEMCRUNCH;
AuUInt64 qwTimeoutAbs {};
AuList<bool> releasedObjects(waitables.size());
if (uFlags & kWaitForFlagTimeoutIsNanoseconds)
{
qwTimeoutAbs = uTimeout;
}
else
{
qwTimeoutAbs = AuMSToNS<AuUInt64>(uTimeout);
}
if ((!(uFlags & kWaitForFlagTimeoutIsAbsolute)) &&
(uTimeout))
{
qwTimeoutAbs += AuTime::SteadyClockNS();
}
auto bIsAnd = !(uFlags & kWaitForFlagTimeoutIsOr);
auto bStatus = YieldPollNs(true, qwTimeoutAbs, [&]()
{
bool bStatus { !waitables.size() };
for (AU_ITERATE_N(i, waitables.size()))
{
if (releasedObjects[i])
{
continue;
}
bool bLocked {};
if (bIsAnd)
{
bLocked = WaitForAbsNS(waitables[i], qwTimeoutAbs);
}
else
{
bLocked = waitables[i]->TryLock();
if (pIndexAwoken)
{
*pIndexAwoken = i;
}
}
if (bLocked)
{
releasedObjects[i] = true;
bStatus = true;
}
else
{
if (bIsAnd)
{
return false;
}
}
}
return bStatus;
});
if (!bStatus)
{
for (AU_ITERATE_N(i, waitables.size()))
{
if (releasedObjects[i])
{
AuUInt uHandle(0);
if (!waitables[i]->HasOSHandle(uHandle) &&
uHandle == 0xFF69421)
{
// Autoreset events
// In 2020/2021, I didn't want semaphore and mutex behaviour in event iwaitable::unlock()
// The logic:
//
// AU_LOCK_GUARD(gMutex) makes sense
//
// Semaphore gSemaphore(1); // a mutex
// AU_LOCK_GUARD(gSemaphore) can make sense in academic theory only
//
// AU_LOCK_GUARD(gResetEvent) does not sense.
// in the condvar pattern, the signaling thread does the unlock.
//
// ...therefore, the ::Unlock() should not be event setters.
AuStaticCast<AuThreadPrimitives::IEvent>(waitables[i])->Set();
}
else
{
// Semaphores and Mutexes
waitables[i]->Unlock();
}
}
}
}
return bStatus;
}
}