AuroraRuntime/Source/Threading/Primitives/SMTYield.hpp
2023-08-28 11:48:13 +01:00

168 lines
4.2 KiB
C++

/***
Copyright (C) 2023 J Reece Wilson (a/k/a "Reece"). All rights reserved.
File: SMTYield.hpp
Date: 2023-3-12
Author: Reece
***/
#pragma once
namespace Aurora::Threading
{
inline AuUInt32 gHasThreadLocalTimeout {};
inline thread_local AuUInt8 tlsSpinCountLocal {};
}
namespace Aurora::Threading::Primitives
{
inline AuUInt32 gSpinAdaptiveThreshold {};
inline AuUInt32 gSpinAdaptiveCurrentCount {};
inline AuUInt32 gUseFutexRWLock {};
void InitAdaptiveThreshold();
void InitAdaptiveThresholdFirstTime();
static auline void SMPPause()
{
#if (defined(AURORA_ARCH_X64) || defined(AURORA_ARCH_X86))
_mm_pause();
#elif defined(AURORA_ARCH_ARM)
#if defined(AURORA_COMPILER_GCC)
asm volatile("yield");
#else
__yield();
#endif
#else
// TODO: your platform here
AuThreading::ContextYield();
#endif
}
template <typename T>
bool auline YieldToSharedCore(long spin, T callback)
{
if (callback())
{
return true;
}
if (gSpinAdaptiveThreshold)
{
auto uNow = AuAtomicAdd(&gSpinAdaptiveCurrentCount, 1u);
if (uNow <= gSpinAdaptiveThreshold)
{
int loops = spin;
while (loops > 0)
{
if (callback())
{
AuAtomicSub(&gSpinAdaptiveCurrentCount, 1u);
return true;
}
else
{
SMPPause();
SMPPause();
SMPPause();
SMPPause();
loops -= 4;
}
}
if (gHasThreadLocalTimeout)
{
int loops = (1 << tlsSpinCountLocal);
while (loops > 0)
{
if (callback())
{
AuAtomicSub(&gSpinAdaptiveCurrentCount, 1u);
return true;
}
else
{
SMPPause();
loops--;
}
}
}
AuAtomicSub(&gSpinAdaptiveCurrentCount, 1u);
}
else
{
int loops = (spin) / 3;
while (loops > 0)
{
if (callback())
{
AuAtomicSub(&gSpinAdaptiveCurrentCount, 1u);
return true;
}
else
{
SMPPause();
loops --;
}
}
}
AuAtomicSub(&gSpinAdaptiveCurrentCount, 1u);
}
else
{
int loops = spin;
while (loops > 0)
{
if (callback())
{
return true;
}
else
{
SMPPause();
SMPPause();
SMPPause();
SMPPause();
loops -= 4;
}
}
if (gHasThreadLocalTimeout)
{
auto uCount = tlsSpinCountLocal;
int loops = (1 << uCount);
while (loops > 0)
{
if (callback())
{
return true;
}
else
{
SMPPause();
loops --;
}
}
}
}
return callback();
}
template <typename T>
bool auline DoTryIf(T callback)
{
if (gRuntimeConfig.threadingConfig.bPlatformIsSMPProcessorOptimized)
{
return YieldToSharedCore(gRuntimeConfig.threadingConfig.uSpinLoopPowerA, callback);
}
else
{
return callback();
}
}
}