AuroraRuntime/Source/IO/Loop/LSLocalSemaphore.cpp
J Reece Wilson 02826d2365 [+] AuLoop::kWaitMultipleFlagNoIOCallbacks
[+] AuLoop::kWaitMultipleFlagBreakAfterAPC
[+] Alternative Wait AND implementations for NT, POSIX, and generic
[+] IOConfig::...
[*] LoopQueue improvements
[+] ILoopQueue::ConfigureDoIOApcCallbacks
[+] ILoopQueue::ConfigureDoIOApcCallbacks
2024-10-10 11:03:26 +01:00

394 lines
10 KiB
C++

/***
Copyright (C) 2023 J Reece Wilson (a/k/a "Reece"). All rights reserved.
File: LSLocalSemaphore.cpp
Date: 2023-10-21
Author: Reece
***/
#include <RuntimeInternal.hpp>
#include "LSLocalSemaphore.hpp"
#include <Source/Threading/Primitives/SMTYield.hpp>
namespace Aurora::IO::Loop
{
extern AuRWRenterableLock gWaitForMultipleALLLock;
LSLocalSemaphore::LSLocalSemaphore()
{
}
LSLocalSemaphore::~LSLocalSemaphore()
{
}
bool LSLocalSemaphore::TryInit(AuUInt32 initialCount, AuUInt32 uMaxCount)
{
if (!LSSemaphore::TryInit(initialCount))
{
return false;
}
this->uAtomicSemaphore = initialCount;
this->uAtomicKernelSemaphore = initialCount;
this->uMaxCount = uMaxCount;
return true;
}
void LSLocalSemaphore::DoParanoia()
{
if (auto uCount = AuAtomicLoad(&this->uAtomicSemaphore))
{
AuAtomicAdd(&this->uAtomicKernelSemaphore, uCount);
LSSemaphore::AddMany(uCount);
}
}
bool LSLocalSemaphore::OnTrigger(AuUInt handle)
{
auto bRet = this->TryTakeNoSpin();
while (true)
{
auto uOld = AuAtomicLoad(&this->uAtomicKernelSemaphore);
if (uOld == 0)
{
DoParanoia();
break;
}
if (AuAtomicCompareExchange(&this->uAtomicKernelSemaphore, uOld - 1, uOld) == uOld)
{
auto uCount = AuAtomicLoad(&this->uAtomicSemaphore);
if (uOld - 1 == 0)
{
#if defined(AURORA_PLATFORM_LINUX)
if (uCount == 1)
{
// Don't acknowledge?
// Don't write into?
// saves two syscalls for nothang
AuAtomicAdd(&this->uAtomicKernelSemaphore, 1u);
}
else if (uCount)
{
AuAtomicAdd(&this->uAtomicKernelSemaphore, uCount);
LSSemaphore::AddMany(uCount - 1);
}
#else
if (uCount)
{
AuAtomicAdd(&this->uAtomicKernelSemaphore, uCount);
LSSemaphore::AddMany(uCount);
#if !defined(AURORA_IS_MODERNNT_DERIVED)
(void)LSSemaphore::OnTrigger(0);
#endif
}
#endif
else
{
(void)LSSemaphore::OnTrigger(0);
}
}
else if (uOld || !bRet)
{
(void)LSSemaphore::OnTrigger(0);
}
break;
}
}
return bRet;
}
bool LSLocalSemaphore::AddOne()
{
AuUInt32 uNext {};
if (auto uMaxValue = this->uMaxCount)
{
while (true)
{
auto uCurrentValue = AuAtomicLoad(&this->uAtomicSemaphore);
uNext = uCurrentValue + 1;
if (uNext > uMaxValue)
{
return false;
}
if (AuAtomicCompareExchange(&this->uAtomicSemaphore, uNext, uCurrentValue) == uCurrentValue)
{
break;
}
}
}
else
{
uNext = AuAtomicAdd(&this->uAtomicSemaphore, 1u);
}
while (true)
{
auto uCurrentValue = AuAtomicLoad(&this->uAtomicKernelSemaphore);
auto uNextValue = uCurrentValue;
bool bCanReturn = false;
if (uCurrentValue < uNext)
{
uNextValue = uNext;
}
else
{
bCanReturn = true;
}
if (AuAtomicCompareExchange(&this->uAtomicKernelSemaphore, uNextValue, uCurrentValue) == uCurrentValue)
{
if (bCanReturn)
{
return true;
}
else
{
break;
}
}
}
return LSSemaphore::AddOne();
}
bool LSLocalSemaphore::AddMany(AuUInt32 uCount)
{
AuUInt32 uNext {};
if (auto uMaxValue = this->uMaxCount)
{
while (true)
{
auto uCurrentValue = AuAtomicLoad(&this->uAtomicSemaphore);
uNext = uCurrentValue + uCount;
if (uNext > uMaxValue)
{
return false;
}
if (AuAtomicCompareExchange(&this->uAtomicSemaphore, uNext, uCurrentValue) == uCurrentValue)
{
break;
}
}
}
else
{
uNext = AuAtomicAdd(&this->uAtomicSemaphore, uCount);
}
#if 0
if (AuAtomicLoad(&this->uAtomicKernelSemaphore) >= uNext)
{
return true;
}
else
{
/* if AddMany add/load/kernel-wake race condition, it's the next AddMany persons problem. */
/* uAtomicKernelSemaphore cannot be lower than uAtomicSemaphore, at the epilogue of the last unlock/adds tick. */
/* If it somehow is, ::OnTrigger will check that the final kernel negative increment does not occur just before (linux) after (win32) (bool(this->uAtomicSemaphore)). */
/* Remember: this->uAtomicKernelSemaphore should only be decremented after uAtomicSemaphore and uAtomicKernelSemaphore have already been incremented together... */
/* ...therefore, the last kernel waker should always see bool(this->uAtomicSemaphore), unless stolen by another thread. */
/* if stolen, it's a race condition we dont care about; we avoided the kernel object and state entirely. We have to wait for another AddMany to wake us up. */
/* if not stolen, uAtomicSemaphore is read as non-zero, and is readded to the kernel semaphore */
/* Most users just use the IO event objects LSAsync and LSLocalEvent. These are known good. */
}
AuAtomicAdd(&this->uAtomicKernelSemaphore, uCount);
#else
while (true)
{
auto uCurrentValue = AuAtomicLoad(&this->uAtomicKernelSemaphore);
auto uNextValue = uCurrentValue;
bool bCanReturn = false;
if (uCurrentValue < uNext)
{
uNextValue = uNext;
}
else
{
bCanReturn = true;
}
if (AuAtomicCompareExchange(&this->uAtomicKernelSemaphore, uNextValue, uCurrentValue) == uCurrentValue)
{
if (bCanReturn)
{
return true;
}
else
{
uCount = uNext - uCurrentValue;
break;
}
}
}
#endif
return LSSemaphore::AddMany(uCount);
}
bool LSLocalSemaphore::IsSignaled()
{
return this->TryTake();
}
bool LSLocalSemaphore::IsSignaledNoSpinIfUserland()
{
return this->TryTakeNoSpin();
}
ELoopSource LSLocalSemaphore::GetType()
{
return ELoopSource::eSourceFastSemaphore;
}
bool LSLocalSemaphore::TryTakeNoSpin()
{
AuUInt32 uOld {};
while ((uOld = this->uAtomicSemaphore))
{
if (AuAtomicCompareExchange(&this->uAtomicSemaphore, uOld - 1, uOld) == uOld)
{
return true;
}
}
return false;
}
bool LSLocalSemaphore::TryTakeSpin()
{
bool bRet = Threading::Primitives::DoTryIfAlderLake([&]
{
return this->TryTakeNoSpin();
}, &this->uAtomicSemaphore);
#if !defined(AU_NO_WAITMULTIPLELS_ALL_MS_PARITY)
if (!bRet && gRuntimeConfig.ioConfig.bAimCloserForNTParityWaitALL)
{
AU_LOCK_GLOBAL_GUARD(gWaitForMultipleALLLock->AsReadable());
bRet = this->TryTakeNoSpin();
}
#endif
return bRet;
}
bool LSLocalSemaphore::TryTake()
{
return this->TryTakeSpin();
}
bool LSLocalSemaphore::TryTakeWaitNS(AuUInt64 uEndTime)
{
if (this->TryTakeSpin())
{
return true;
}
while (!this->TryTakeNoSpin())
{
if (!uEndTime)
{
if (LSSemaphore::WaitOn(0))
{
return true;
}
}
else
{
auto uStartTime = Time::SteadyClockNS();
if (uStartTime >= uEndTime)
{
return false;
}
auto uDeltaMs = AuNSToMS<AuInt64>(uEndTime - uStartTime);
if (uDeltaMs &&
LSSemaphore::WaitOn(uDeltaMs))
{
return true;
}
else if (!uDeltaMs)
{
if (this->TryTakeSpin())
{
return true;
}
}
}
}
return true;
}
void LSLocalSemaphore::OnPresleep()
{
}
void LSLocalSemaphore::OnFinishSleep()
{
}
AUKN_SYM AuSPtr<ILSSemaphore> NewLSSemaphore(AuUInt32 uInitialCount)
{
#if defined(AURORA_IS_MODERNNT_DERIVED)
if (gRuntimeConfig.ioConfig.bINeedPerfectAnds)
{
return NewLSSemaphoreSlow(uInitialCount);
}
#endif
auto pMutex = AuMakeShared<LSLocalSemaphore>();
if (!pMutex)
{
SysPushErrorGeneric();
return {};
}
if (!pMutex->TryInit(uInitialCount))
{
SysPushErrorNested();
return {};
}
return pMutex;
}
AUKN_SYM AuSPtr<ILSSemaphore> NewLSSemaphoreEx(AuUInt32 uInitialCount, AuUInt32 uMaxCount)
{
auto pMutex = AuMakeShared<LSLocalSemaphore>();
if (!pMutex)
{
SysPushErrorGeneric();
return {};
}
if (!pMutex->TryInit(uInitialCount, uMaxCount))
{
SysPushErrorNested();
return {};
}
return pMutex;
}
}