AuroraRuntime/Source/Threading/Primitives/AuMutex.NT.cpp
Jamie Reece Wilson 3898a41198 [*] Adopt new ROXTL atomics
...AuAtomicLoad + AuAtomicClearU8Lock
2023-08-23 22:03:00 +01:00

376 lines
12 KiB
C++

/***
Copyright (C) 2021 J Reece Wilson (a/k/a "Reece"). All rights reserved.
File: AuMutex.NT.cpp
Date: 2021-6-12
Author: Reece
***/
#include <Source/RuntimeInternal.hpp>
#include "AuMutex.Generic.hpp"
#include "SMTYield.hpp"
#include "../AuWakeInternal.hpp"
#if !defined(_AURUNTIME_GENERICMUTEX)
#include "AuMutex.NT.hpp"
#include "AuConditionMutex.NT.hpp"
#include <Time/Time.hpp>
#if !defined(NTSTATUS_TIMEOUT)
#define NTSTATUS_TIMEOUT 0x102
#endif
namespace Aurora::Threading::Primitives
{
MutexImpl::MutexImpl()
{
if (!pWaitOnAddress)
{
#if defined(AURORA_FORCE_SRW_LOCKS)
::InitializeSRWLock(&this->atomicHolder_);
::InitializeConditionVariable(&this->wakeup_);
#endif
}
this->state_ = 0;
}
MutexImpl::~MutexImpl()
{
}
bool MutexImpl::HasOSHandle(AuMach &mach)
{
return false;
}
bool MutexImpl::TryLockHeavy()
{
return DoTryIf([=]()
{
return this->TryLockNoSpin();
});
}
bool MutexImpl::TryLock()
{
if (gRuntimeConfig.threadingConfig.bPreferNtMutexSpinTryLock)
{
return TryLockHeavy();
}
else
{
return TryLockNoSpin();
}
}
bool MutexImpl::TryLockNoSpin()
{
return AuAtomicTestAndSet(&this->state_, 0) == 0;
}
bool MutexImpl::HasLockImplementation()
{
return true;
}
void MutexImpl::SlowLock()
{
auto status = LockNS(0);
SysAssert(status, "Couldn't lock Mutex object");
}
bool MutexImpl::LockMS(AuUInt64 uTimeout)
{
if (this->TryLockNoSpin())
{
return true;
}
return this->LockNS(AuMSToNS<AuUInt64>(uTimeout));
}
bool MutexImpl::LockNS(AuUInt64 uTimeout)
{
bool returnValue = false;
if (this->TryLockHeavy())
{
return true;
}
AuUInt64 uEndTime = uTimeout ? Time::SteadyClockNS() + uTimeout : 0;
int iYieldCounter {};
if (gUseNativeWaitMutex)
{
while (!this->TryLockNoSpin())
{
auto &uValueRef = this->state_;
auto uValue = uValueRef | 1;
auto uNextValue = uValue + kFutexBitWait;
if (AuAtomicCompareExchange(&uValueRef, uNextValue, uValue) == uValue)
{
if (!InternalLTSWaitOnAddressHighRes((void *)&uValueRef, &uNextValue, sizeof(uNextValue), uEndTime))
{
return false;
}
}
}
return true;
}
else
{
#if defined(AURORA_FORCE_SRW_LOCKS)
::AcquireSRWLockShared(&this->atomicHolder_);
BOOL status = false;
while (!this->TryLockNoSpin())
{
AuUInt32 uTimeoutMS = INFINITE;
if (uTimeout != 0)
{
auto uStartTime = Time::SteadyClockNS();
if (uStartTime >= uEndTime)
{
goto exitWin32;
}
uTimeoutMS = AuNSToMS<AuInt64>(uEndTime - uStartTime);
}
if (!uTimeoutMS)
{
::ReleaseSRWLockShared(&this->atomicHolder_);
SMPPause();
AuThreading::ContextYield();
::AcquireSRWLockShared(&this->atomicHolder_);
}
else
{
(void)SleepConditionVariableSRW(&this->wakeup_, &this->atomicHolder_, uTimeoutMS, CONDITION_VARIABLE_LOCKMODE_SHARED);
}
}
returnValue = true;
exitWin32:
::ReleaseSRWLockShared(&this->atomicHolder_);
#else
if (!uTimeout)
{
while (!this->TryLockNoSpin())
{
auto &uValueRef = this->state_;
auto uValue = uValueRef | 1;
if (AuAtomicCompareExchange(&uValueRef, uValue + kFutexBitWait, uValue) == uValue)
{
pNtWaitForKeyedEvent(gKeyedEventHandle, (void *)&uValueRef, 0, NULL);
AuAtomicSub(&uValueRef, kFutexBitWake);
}
}
return true;
}
else
{
auto &uValueRef = this->state_;
returnValue = true;
auto uEndTimeSteady = AuTime::SteadyClockNS() + uTimeout;
auto uEndTimeWall = AuTime::CurrentClockNS() + uTimeout;
bool bFailed {};
while (bFailed || (!this->TryLockNoSpin()))
{
auto uValue = uValueRef | 1;
if (!bFailed &&
AuTime::SteadyClockNS() >= uEndTimeSteady)
{
returnValue = this->TryLock();
break;
}
if (bFailed || AuAtomicCompareExchange(&uValueRef, uValue + kFutexBitWait, uValue) == uValue)
{
auto uTargetTimeNt = AuTime::ConvertTimestampNs(uEndTimeWall);
LARGE_INTEGER word;
word.QuadPart = uTargetTimeNt;
auto uStatus = pNtWaitForKeyedEvent(gKeyedEventHandle, (void *)&this->state_, 0, &word);
if (uStatus == NTSTATUS_TIMEOUT)
{
auto uWWaiters = this->state_ & ~kFutexBitWake;
if (uWWaiters >= kFutexBitWait && AuAtomicCompareExchange(&this->state_, uWWaiters - kFutexBitWait, uWWaiters) == uWWaiters)
{
continue;
}
else
{
bFailed = true;
continue;
}
}
else
{
AuAtomicSub(&uValueRef, kFutexBitWake);
SysAssertDbg(uStatus == 0);
}
}
bFailed = false;
}
}
#endif
return returnValue;
}
}
void MutexImpl::Unlock()
{
#if defined(AURORA_FORCE_SRW_LOCKS)
if (gUseNativeWaitMutex)
{
auto &uValueRef = this->state_;
*(AuUInt8 *)&uValueRef = 0;
while (true)
{
auto uValue = uValueRef;
if (uValue < kFutexBitWait)
{
return;
}
if (AuAtomicCompareExchange(&uValueRef, uValue - kFutexBitWait, uValue) == uValue)
{
pWakeByAddressSingle((void *)&this->state_);
return;
}
SMPPause();
}
return;
}
::AcquireSRWLockExclusive(&this->atomicHolder_);
this->state_ = 0;
::ReleaseSRWLockExclusive(&this->atomicHolder_);
::WakeAllConditionVariable(&this->wakeup_);
#else
auto &uValueRef = this->state_;
//#if defined(AURORA_COMPILER_MSVC)
// #if defined(AURORA_ARCH_X86) || defined(AURORA_ARCH_X64)
// // Intel 64 and IA - 32 Architectures Software Developer's Manual, Volume 3A: Section: 8.2.3.1
// *(AuUInt8 *)&uValueRef = 0;
//
// // From this point onwards, our thread could be subject to StoreLoad re-ordering
// // ...but it should not matter.
//
// // Given the memory model of x86[64], we can only really expect to be out of order during an unfenced load operation, which in this class, can only be expected under this function before the CAS.
// // No other place reads.
//
// // Re-ordering race condition 1: one thread wins an atomic bit set, that we dont catch until the CAS, resulting in: a slow implicit fence under the cas, a mm_pause stall, a compare, and a return
// // alt: uValueRef reads zero, resulting in a preemptive return while no threads need to be awoken
// // Re-ordering race condition 2: we unlock, multiple threads enter ::Lock(), we somehow read `uValue = uValueRef` as zero, and then the first atomic bitsetandtest winner thread signals the keyed mutex
// // I fail to see how:
// // *byte = 0; | |
// // | interlocked atomicbitset | interlocked atomicbitset fail
// // | [logic] | interlocked atomic set kFutexBitWait
// // | *byte = 0; | yield
// // | auto uValue =[acquire]= uValueRef
// // ...would result in the second thread missing the third threads atomic set kFutexBitWait (cst (?) on the account of 8.2.3.1, 8.2.3.8, etc)
//
// // Also note: mfence is far too expensive and the _ReadWriteBarrier() intrinsics do absolutely nothing
// _ReadWriteBarrier();
// #else
// InterlockedAndRelease((volatile LONG *)&uValueRef, ~0xFF);
// #endif
//#else
// __sync_lock_release((AuUInt8 *)&uValueRef); // __atomic_store_explicit((AuUInt8 *)&uValueRef, 0, __ATOMIC_RELEASE)
//#endif
// merged with ROXTL
AuAtomicClearU8Lock(&uValueRef);
while (true)
{
auto uValue = uValueRef;
if (uValue < kFutexBitWait)
{
return;
}
// StoreLoad race-conditions here cannot result in a return
// We should see StoreLoads of at least our *pByte = 0
// or we should at least see the CST of kFutexBitWait being applied
if (uValue & 1)
{
return;
}
if (gUseNativeWaitMutex)
{
if (AuAtomicCompareExchange(&uValueRef, uValue - kFutexBitWait, uValue) == uValue)
{
pWakeByAddressSingle((void *)&this->state_);
return;
}
}
else
{
if (uValue & kFutexBitWake)
{
// StoreLoad paranoia
if (AuAtomicCompareExchange(&uValueRef, uValue, uValue) == uValue)
{
return;
}
else
{
SMPPause();
continue;
}
}
if (AuAtomicCompareExchange(&uValueRef, uValue - kFutexBitWait + kFutexBitWake, uValue) == uValue)
{
pNtReleaseKeyedEvent(gKeyedEventHandle, (void *)&uValueRef, 0, NULL);
return;
}
}
SMPPause();
}
#endif
}
AUKN_SYM IHyperWaitable *MutexNew()
{
return _new MutexImpl();
}
AUKN_SYM void MutexRelease(IHyperWaitable *pMutex)
{
AuSafeDelete<MutexImpl *>(pMutex);
}
AUROXTL_INTERFACE_SOO_SRC_EX(AURORA_SYMBOL_EXPORT, Mutex, MutexImpl)
}
#endif