AuroraRuntime/Source/Threading/Primitives/AuSemaphore.Linux.cpp
Jamie Reece Wilson a454a2d71e [*] Sync primitive improvements
[*] Reverted a change for UNIX: always never-spin acquire under observational lock
[*] Decreased common case of syscall operations under Linux and UNIX
[*] Unix signaling: prevent waits while during condvar wake up by unlocking before the signal
[*] NT no wait: semaphores must not spin under lock
2023-06-26 08:59:49 +01:00

182 lines
4.3 KiB
C++

/***
Copyright (C) 2022 J Reece Wilson (a/k/a "Reece"). All rights reserved.
File: AuSemaphore.Linux.cpp
Date: 2022-12-28
Author: Reece
***/
#include <Source/RuntimeInternal.hpp>
#include "AuSemaphore.Generic.hpp"
#include "SMTYield.hpp"
#include <sys/syscall.h>
#include <linux/futex.h>
#if !defined(_AURUNTIME_GENERIC_SEMAPHORE)
#include <Source/Time/Time.hpp>
namespace Aurora::Threading::Primitives
{
static int futex(uint32_t *uaddr, int futex_op, uint32_t val,
const struct timespec *timeout,
uint32_t *uaddr2, uint32_t val3)
{
return syscall(SYS_futex, uaddr, futex_op, val, timeout, uaddr2, val3);
}
static int futex_wait(uint32_t *addr, uint32_t expected)
{
return futex(addr, FUTEX_WAIT, expected, 0, 0, 0);
}
static int futex_wait(uint32_t *addr, uint32_t expected, const struct timespec *timeout)
{
if (timeout)
{
return futex(addr, FUTEX_WAIT_BITSET, expected, timeout, 0, FUTEX_BITSET_MATCH_ANY);
}
else
{
return futex(addr, FUTEX_WAIT, expected, timeout, 0, 0);
}
}
static int futex_wake(uint32_t *addr, uint32_t nthreads)
{
return futex(addr, FUTEX_WAKE, nthreads, 0, 0, 0);
}
SemaphoreImpl::SemaphoreImpl(long intialValue) : value_(intialValue)
{
}
SemaphoreImpl::~SemaphoreImpl()
{
}
bool SemaphoreImpl::HasOSHandle(AuMach &mach)
{
return false;
}
bool SemaphoreImpl::HasLockImplementation()
{
return true;
}
bool SemaphoreImpl::TryLock()
{
return DoTryIf([=]()
{
auto old = this->value_;
return (old != 0 && AuAtomicCompareExchange(&this->value_, old - 1, old) == old);
});
}
bool SemaphoreImpl::LockMS(AuUInt64 uTimeout)
{
return LockNS(AuMSToNS<AuUInt64>(uTimeout));
}
bool SemaphoreImpl::LockNS(AuUInt64 uTimeout)
{
AuUInt64 uStart {};
AuUInt64 uEnd {};
if (this->TryLock())
{
return true;
}
errno = 0;
struct timespec tspec;
if (uTimeout != 0)
{
uStart = AuTime::SteadyClockNS();
uEnd = uStart + uTimeout;
Time::auabsns2ts(&tspec, uEnd);
}
auto old = this->value_;
//!tryLock (with old in a scope we can access)
while (!((old != 0) &&
(AuAtomicCompareExchange(&this->value_, old - 1, old) == old)))
{
if (uTimeout != 0)
{
if (Time::SteadyClockNS() >= uEnd)
{
return false;
}
int ret {};
do
{
ret = futex_wait(&this->value_, 0, &tspec);
}
while (ret == EINTR);
}
else
{
int ret {};
bool bStatus {};
do
{
if ((ret = futex_wait(&this->value_, 0)) == 0)
{
bStatus = true;
continue;
}
if (ret == EAGAIN || errno == EAGAIN)
{
bStatus = true;
continue;
}
}
while (ret == EINTR);
RUNTIME_ASSERT_SHUTDOWN_SAFE(bStatus, "semaphore wait failed: {}", ret)
}
old = this->value_;
}
return true;
}
void SemaphoreImpl::Lock()
{
auto status = LockNS(0);
SysAssert(status, "Couldn't lock semaphore");
}
void SemaphoreImpl::Unlock(long count)
{
AuAtomicAdd<AuUInt32>(&this->value_, count);
futex_wake(&this->value_, count);
}
void SemaphoreImpl::Unlock()
{
Unlock(1);
}
AUKN_SYM ISemaphore *SemaphoreNew(int iInitialCount)
{
return _new SemaphoreImpl(iInitialCount);
}
AUKN_SYM void SemaphoreRelease(ISemaphore *pSemaphore)
{
AuSafeDelete<SemaphoreImpl *>(pSemaphore);
}
AUROXTL_INTERFACE_SOO_SRC_EX(AURORA_SYMBOL_EXPORT, Semaphore, SemaphoreImpl)
}
#endif