2021-11-05 17:34:23 +00:00
/***
Copyright ( C ) 2021 J Reece Wilson ( a / k / a " Reece " ) . All rights reserved .
File : ThreadPool . cpp
Date : 2021 - 10 - 30
Author : Reece
* * */
# include <Source/RuntimeInternal.hpp>
# include "Async.hpp"
# include "ThreadPool.hpp"
2022-05-17 01:43:26 +00:00
# include "AsyncApp.hpp"
2021-11-05 17:34:23 +00:00
# include "WorkItem.hpp"
# include "Schedular.hpp"
2022-03-10 15:35:01 +00:00
# include "ThreadWorkerQueueShim.hpp"
2023-05-25 00:55:55 +00:00
# include <Source/IO/Loop/LSAsync.hpp>
2021-11-05 17:34:23 +00:00
namespace Aurora : : Async
{
//STATIC_TLS(WorkerId_t, tlsWorkerId);
static thread_local AuWPtr < ThreadPool > gCurrentPool ;
static const auto kMagicResortThreshold = 15 ;
2022-06-11 23:01:27 +00:00
static thread_local int tlsCallStack ;
2022-05-17 01:43:26 +00:00
inline auto GetWorkerInternal ( const AuSPtr < IThreadPool > & pool )
{
if ( pool . get ( ) = = AuStaticCast < IAsyncApp > ( gAsyncApp ) )
{
return AuUnsafeRaiiToShared ( AuStaticCast < ThreadPool > ( gAsyncApp ) ) ;
}
return AuStaticPointerCast < ThreadPool > ( pool ) ;
}
2021-11-05 17:34:23 +00:00
AUKN_SYM WorkerPId_t GetCurrentWorkerPId ( )
{
auto lkPool = gCurrentPool . lock ( ) ;
2022-05-10 13:51:22 +00:00
if ( ! lkPool ) return { } ;
2021-11-05 17:34:23 +00:00
auto cpy = * lkPool - > tlsWorkerId ;
auto lkPool2 = cpy . pool . lock ( ) ;
return WorkerPId_t ( lkPool , cpy ) ;
}
//
2023-05-24 07:19:47 +00:00
ThreadPool : : ThreadPool ( ) : shutdownEvent_ ( false , false , true )
2021-11-05 17:34:23 +00:00
{
}
// internal pool interface
bool ThreadPool : : WaitFor ( WorkerId_t unlocker , const AuSPtr < Threading : : IWaitable > & primitive , AuUInt32 timeoutMs )
2023-01-23 21:18:58 +00:00
{
return WaitFor ( WorkerPId_t { AuAsync : : GetCurrentWorkerPId ( ) . pool , unlocker } , primitive , timeoutMs ) ;
}
bool ThreadPool : : WaitFor ( WorkerPId_t unlocker , const AuSPtr < Threading : : IWaitable > & primitive , AuUInt32 timeoutMs )
2021-11-05 17:34:23 +00:00
{
auto curThread = GetThreadState ( ) ;
2022-05-10 13:51:22 +00:00
if ( ! curThread )
{
return Threading : : WaitFor ( primitive . get ( ) , timeoutMs ) ;
}
2021-11-05 17:34:23 +00:00
bool workerIdMatches = ( unlocker . second = = curThread - > id . second ) | | ( ( unlocker . second = = Async : : kThreadIdAny ) & & ( GetThreadWorkersCount ( unlocker . first ) = = 1 ) ) ;
2023-01-23 21:18:58 +00:00
if ( ( unlocker . first = = curThread - > id . first ) & &
( unlocker . pool . get ( ) = = this ) & & // work group matches
2021-11-05 17:34:23 +00:00
( workerIdMatches ) ) // well, crap
{
bool queryAsync = false ;
while ( ! ( queryAsync ? primitive - > TryLock ( ) : Threading : : WaitFor ( primitive . get ( ) , 2 ) ) )
{
queryAsync = CtxYield ( ) ;
2023-02-08 18:23:37 +00:00
if ( ! queryAsync & & this - > shuttingdown_ )
{
return false ;
}
2021-11-05 17:34:23 +00:00
}
return true ;
}
else
{
2023-01-30 14:32:26 +00:00
AuSPtr < ThreadState > pHandle ;
{
AU_LOCK_GUARD ( AuStaticCast < ThreadPool > ( unlocker . pool ) - > rwlock_ - > AsReadable ( ) ) ;
2023-08-18 22:30:38 +00:00
if ( ( pHandle = AuStaticCast < ThreadPool > ( unlocker . pool ) - > GetThreadHandle ( unlocker ) ) )
2023-01-30 14:32:26 +00:00
{
2023-02-16 00:45:10 +00:00
AU_LOCK_GUARD ( pHandle - > externalFencesLock ) ;
2023-01-30 14:32:26 +00:00
if ( pHandle - > exitingflag2 )
{
return primitive - > TryLock ( ) ;
}
else
{
pHandle - > externalFences . push_back ( primitive . get ( ) ) ;
}
}
2023-02-17 04:44:17 +00:00
else if ( unlocker . pool . get ( ) = = this )
{
2023-06-07 19:35:31 +00:00
return primitive - > LockMS ( timeoutMs ) ;
2023-02-17 04:44:17 +00:00
}
2023-01-30 14:32:26 +00:00
}
bool bRet = Threading : : WaitFor ( primitive . get ( ) , timeoutMs ) ;
if ( pHandle )
{
AU_LOCK_GUARD ( pHandle - > externalFencesLock ) ;
AuTryRemove ( pHandle - > externalFences , primitive . get ( ) ) ;
}
return bRet ;
2021-11-05 17:34:23 +00:00
}
}
void ThreadPool : : Run ( WorkerId_t target , AuSPtr < IAsyncRunnable > runnable )
2023-08-09 02:21:14 +00:00
{
return this - > Run ( target , runnable , true ) ;
}
void ThreadPool : : Run ( WorkerId_t target , AuSPtr < IAsyncRunnable > runnable , bool bIncrement )
2021-11-05 17:34:23 +00:00
{
auto state = GetGroup ( target . first ) ;
SysAssert ( static_cast < bool > ( state ) , " couldn't dispatch a task to an offline group " ) ;
2023-05-25 00:55:55 +00:00
auto pWorker = state - > GetThreadByIndex ( target . second ) ;
if ( ! pWorker )
2021-11-05 17:34:23 +00:00
{
2023-05-25 00:55:55 +00:00
runnable - > CancelAsync ( ) ;
return ;
}
2021-11-05 17:34:23 +00:00
2023-08-10 02:34:44 +00:00
AU_DEBUG_MEMCRUNCH ;
2023-08-09 02:21:14 +00:00
if ( bIncrement )
{
AuAtomicAdd ( & this - > uAtomicCounter , 1u ) ;
}
2021-11-05 17:34:23 +00:00
2023-07-10 23:54:54 +00:00
state - > workQueue . AddWorkEntry ( AuMakePair ( target . second , runnable ) ) ;
2023-06-07 19:51:23 +00:00
if ( target . second = = Async : : kThreadIdAny )
{
state - > BroadCast ( ) ;
}
else
2023-05-25 00:55:55 +00:00
{
AU_LOCK_GUARD ( pWorker - > cvWorkMutex ) ;
2021-11-05 17:34:23 +00:00
2023-05-25 00:55:55 +00:00
pWorker - > cvVariable - > Signal ( ) ;
pWorker - > eventLs - > Set ( ) ;
2021-11-05 17:34:23 +00:00
}
}
IThreadPool * ThreadPool : : ToThreadPool ( )
{
return this ;
}
// ithreadpool
size_t ThreadPool : : GetThreadWorkersCount ( ThreadGroup_t group )
{
2021-11-07 20:17:08 +00:00
AU_LOCK_GUARD ( this - > rwlock_ - > AsReadable ( ) ) ;
2021-11-05 17:34:23 +00:00
return GetGroup ( group ) - > workers . size ( ) ;
}
void ThreadPool : : SetRunningMode ( bool eventRunning )
{
this - > runnersRunning_ = eventRunning ;
}
bool ThreadPool : : Spawn ( WorkerId_t workerId )
{
return Spawn ( workerId , false ) ;
}
bool ThreadPool : : Create ( WorkerId_t workerId )
{
return Spawn ( workerId , true ) ;
}
bool ThreadPool : : InRunnerMode ( )
{
return this - > runnersRunning_ ;
}
bool ThreadPool : : Poll ( )
{
2023-03-05 12:55:07 +00:00
AuUInt32 uCount { } ;
return InternalRunOne ( false , uCount ) ;
2021-11-05 17:34:23 +00:00
}
bool ThreadPool : : RunOnce ( )
{
2023-03-05 12:55:07 +00:00
AuUInt32 uCount { } ;
return InternalRunOne ( true , uCount ) ;
2021-11-05 17:34:23 +00:00
}
bool ThreadPool : : Run ( )
{
bool ranOnce { } ;
2023-01-23 21:18:58 +00:00
2023-05-24 07:19:47 +00:00
auto pJobRunner = GetThreadStateNoWarn ( ) ;
2021-11-05 17:34:23 +00:00
2023-05-24 07:19:47 +00:00
if ( ! pJobRunner )
2022-05-10 13:51:22 +00:00
{
2023-05-24 07:19:47 +00:00
this - > shutdownEvent_ - > LockMS ( 0 ) ;
return true ;
2022-05-10 13:51:22 +00:00
}
2023-05-24 07:19:47 +00:00
gCurrentPool = AuWeakFromThis ( ) ;
auto auThread = AuThreads : : GetThread ( ) ;
2023-02-08 18:23:37 +00:00
while ( ( ! auThread - > Exiting ( ) ) & &
( ! this - > shutdown ) & &
2023-05-24 07:19:47 +00:00
( ! pJobRunner - > bBreakEarly ) )
2021-11-05 17:34:23 +00:00
{
2023-03-05 12:55:07 +00:00
AuUInt32 uCount { } ;
2021-11-05 17:34:23 +00:00
// Do work (blocking)
2023-03-05 12:55:07 +00:00
if ( ! InternalRunOne ( true , uCount ) )
2023-02-08 18:23:37 +00:00
{
if ( this - > shutdown )
{
return ranOnce ;
}
}
2021-11-05 17:34:23 +00:00
ranOnce = true ;
}
return ranOnce ;
}
2023-03-05 12:55:07 +00:00
bool ThreadPool : : InternalRunOne ( bool block , AuUInt32 & uCount )
2021-11-05 17:34:23 +00:00
{
2023-05-24 07:19:47 +00:00
auto state = GetThreadStateNoWarn ( ) ;
2022-05-10 13:51:22 +00:00
if ( ! state )
{
SysPushErrorUninitialized ( " Not an async thread " ) ;
2023-05-24 07:19:47 +00:00
return false ;
2022-05-10 13:51:22 +00:00
}
2021-11-05 17:34:23 +00:00
bool success { } ;
2022-03-10 15:35:01 +00:00
auto runMode = GetCurrentThreadRunMode ( ) ;
2023-03-05 12:55:07 +00:00
2023-02-08 18:23:37 +00:00
EarlyExitTick ( ) ;
2022-03-10 15:35:01 +00:00
2022-06-22 13:42:17 +00:00
//do
2021-11-05 17:34:23 +00:00
{
2022-03-10 15:35:01 +00:00
auto asyncLoop = state - > asyncLoop ;
asyncLoop - > OnFrame ( ) ;
if ( asyncLoop - > GetSourceCount ( ) > 1 )
2021-11-05 17:34:23 +00:00
{
2022-03-10 15:35:01 +00:00
bool bShouldTrySleepForKernel { } ;
if ( runMode = = ERunMode : : eLowLatencyFreqKernel )
{
if ( state - > rateLimiter . CheckExchangePass ( ) )
{
2022-04-10 15:40:49 +00:00
# if defined(AURORA_PLATFORM_WIN32)
bShouldTrySleepForKernel = asyncLoop - > PumpNonblocking ( ) ;
# else
bShouldTrySleepForKernel = asyncLoop - > IsSignaledPeek ( ) ;
# endif
2022-03-10 15:35:01 +00:00
}
else
{
2023-03-05 12:55:07 +00:00
if ( ! PollInternal ( false , uCount ) )
2022-03-10 15:35:01 +00:00
{
AuThreading : : ContextYield ( ) ;
}
else
{
success = true ;
}
}
}
else if ( runMode = = ERunMode : : eLowLatencyYield )
{
AuThreading : : ContextYield ( ) ;
block = false ;
2022-04-10 15:40:49 +00:00
# if defined(AURORA_PLATFORM_WIN32)
bShouldTrySleepForKernel = asyncLoop - > PumpNonblocking ( ) ;
# else
bShouldTrySleepForKernel = asyncLoop - > IsSignaledPeek ( ) ;
# endif
2022-03-10 15:35:01 +00:00
}
else if ( runMode = = ERunMode : : eEfficient )
{
bShouldTrySleepForKernel = block ;
if ( ! block )
{
2022-04-10 15:40:49 +00:00
bShouldTrySleepForKernel = asyncLoop - > IsSignaledPeek ( ) ;
2022-03-10 15:35:01 +00:00
}
}
2023-06-26 07:11:45 +00:00
if ( bShouldTrySleepForKernel )
{
2022-06-22 13:42:17 +00:00
// epoll and such like can be checked without read success. kevent works on availablity, not scheduling read like iosubmit
2022-04-10 15:40:49 +00:00
// allow windows to atomically pump instead of wasting time buffering the primitives state
2023-06-26 07:11:45 +00:00
if ( ( AuBuild : : kIsNtDerived & & runMode = = ERunMode : : eEfficient ) | |
2022-06-22 13:42:17 +00:00
( ! AuBuild : : kIsNtDerived ) )
2023-06-26 07:11:45 +00:00
{
asyncLoop - > WaitAny ( 0 ) ;
}
2022-03-10 15:35:01 +00:00
}
2023-06-26 07:11:45 +00:00
success = PollInternal ( false , uCount ) ;
2021-11-05 17:34:23 +00:00
}
else
{
2023-03-05 12:55:07 +00:00
success = PollInternal ( block , uCount ) ;
2021-11-05 17:34:23 +00:00
}
2022-06-22 13:42:17 +00:00
} //while (success);
2021-11-05 17:34:23 +00:00
2023-02-08 18:23:37 +00:00
EarlyExitTick ( ) ;
2023-02-09 00:42:56 +00:00
2021-11-05 17:34:23 +00:00
return success ;
}
2023-05-25 00:55:55 +00:00
void GroupWorkQueue : : AddWorkEntry ( WorkEntry_t entry )
2021-11-05 17:34:23 +00:00
{
2023-05-25 00:55:55 +00:00
AU_LOCK_GUARD ( this - > mutex ) ;
2022-05-10 13:51:22 +00:00
2023-05-25 00:55:55 +00:00
auto prio = ( int ) entry . second - > GetPrio ( ) ;
SysAssert ( prio < AuAsync : : kEWorkPrioCount , " Invalid PRIO " ) ;
this - > sortedWork [ prio ] . push_back ( entry ) ;
}
2021-11-05 17:34:23 +00:00
2023-05-25 00:55:55 +00:00
bool GroupWorkQueue : : IsEmpty ( )
{
AU_LOCK_GUARD ( this - > mutex ) ;
for ( AU_ITERATE_N ( i , AuAsync : : kEWorkPrioCount ) )
{
if ( this - > sortedWork [ i ] . size ( ) )
{
2023-05-30 11:46:38 +00:00
return false ;
2023-05-25 00:55:55 +00:00
}
}
2021-11-05 17:34:23 +00:00
2023-05-30 11:46:38 +00:00
return true ;
2023-05-25 00:55:55 +00:00
}
2021-11-05 17:34:23 +00:00
2023-05-25 00:55:55 +00:00
bool GroupWorkQueue : : IsEmpty ( AuWorkerId_t id )
{
AU_LOCK_GUARD ( this - > mutex ) ;
2023-05-30 11:46:38 +00:00
2023-05-25 00:55:55 +00:00
for ( AU_ITERATE_N ( i , AuAsync : : kEWorkPrioCount ) )
2021-11-05 17:34:23 +00:00
{
2023-05-25 00:55:55 +00:00
for ( const auto & [ srcId , pA ] : this - > sortedWork [ i ] )
2021-11-05 17:34:23 +00:00
{
2023-05-25 00:55:55 +00:00
if ( id = = srcId )
2021-11-05 17:34:23 +00:00
{
2023-05-30 11:46:38 +00:00
return false ;
2023-05-25 00:55:55 +00:00
}
}
}
2021-11-05 17:34:23 +00:00
2023-05-30 11:46:38 +00:00
return true ;
2023-05-25 00:55:55 +00:00
}
2021-11-05 17:34:23 +00:00
2023-05-25 00:55:55 +00:00
void GroupWorkQueue : : Dequeue ( AuList < WorkEntry_t > & queue , int maxPopCount , AuAsync : : ThreadId_t id )
{
AU_LOCK_GUARD ( this - > mutex ) ;
2021-11-05 17:34:23 +00:00
2023-05-25 00:55:55 +00:00
for ( AU_ITERATE_N ( i , AuAsync : : kEWorkPrioCount ) )
{
auto & group = this - > sortedWork [ ( int ) AuAsync : : kEWorkPrioMaxLegal - i ] ;
2021-11-05 17:34:23 +00:00
2023-05-25 00:55:55 +00:00
for ( auto itr = group . begin ( ) ; ( ( itr ! = group . end ( ) ) & & ( queue . size ( ) < maxPopCount ) ) ; )
{
if ( itr - > first = = Async : : kThreadIdAny )
{
queue . push_back ( * itr ) ;
itr = group . erase ( itr ) ;
continue ;
}
2021-11-05 17:34:23 +00:00
2023-05-25 00:55:55 +00:00
if ( ( itr - > first ! = Async : : kThreadIdAny ) & &
( itr - > first = = id ) )
{
queue . push_back ( * itr ) ;
itr = group . erase ( itr ) ;
continue ;
2021-11-05 17:34:23 +00:00
}
2023-05-25 00:55:55 +00:00
itr + + ;
2021-11-05 17:34:23 +00:00
}
2023-05-25 00:55:55 +00:00
if ( queue . size ( ) )
2021-11-05 17:34:23 +00:00
{
2023-05-25 00:55:55 +00:00
break ;
}
}
}
2021-11-05 17:34:23 +00:00
2023-05-25 00:55:55 +00:00
// TODO: rewrite queues
bool ThreadPool : : PollInternal ( bool block , AuUInt32 & uCount )
{
2023-05-25 01:53:10 +00:00
auto state = GetThreadStateNoWarn ( ) ;
2023-05-25 00:55:55 +00:00
if ( ! state )
{
SysPushErrorUninitialized ( " Not an async thread " ) ;
2023-05-25 01:53:10 +00:00
return false ;
2023-05-25 00:55:55 +00:00
}
2021-11-05 17:34:23 +00:00
2023-05-25 00:55:55 +00:00
auto group = state - > parent . lock ( ) ;
//state->pendingWorkItems.clear();
{
AU_LOCK_GUARD ( state - > cvWorkMutex ) ;
do
{
group - > workQueue . Dequeue ( state - > pendingWorkItems , state - > multipopCount , state - > id . second ) ;
2021-11-05 17:34:23 +00:00
// Consider blocking for more work
if ( ! block )
{
break ;
}
2022-01-19 02:49:44 +00:00
// pre-wakeup thread terminating check
2023-02-08 18:23:37 +00:00
if ( state - > threadObject - > Exiting ( ) )
2022-01-19 02:49:44 +00:00
{
break ;
}
2023-02-08 18:23:37 +00:00
2021-11-05 17:34:23 +00:00
// Block if no work items are present
if ( state - > pendingWorkItems . empty ( ) )
{
2023-06-07 19:35:31 +00:00
if ( this - > shuttingdown_ & 2 )
2023-02-08 18:23:37 +00:00
{
break ;
}
2023-05-25 00:55:55 +00:00
state - > cvVariable - > WaitForSignal ( ) ;
2023-02-08 18:23:37 +00:00
2023-06-07 19:35:31 +00:00
if ( this - > shuttingdown_ & 2 )
2023-02-08 18:23:37 +00:00
{
break ;
}
2021-11-05 17:34:23 +00:00
}
// Post-wakeup thread terminating check
2023-02-08 18:23:37 +00:00
if ( state - > threadObject - > Exiting ( ) )
2021-11-05 17:34:23 +00:00
{
break ;
}
2022-11-17 20:56:41 +00:00
if ( state - > pendingWorkItems . empty ( ) & & (
( this - > GetThreadState ( ) - > asyncLoop - > GetSourceCount ( ) > 1 ) | |
this - > GetThreadState ( ) - > asyncLoop - > CommitPending ( ) ) ) //(this->ToKernelWorkQueue()->IsSignaledPeek()))
{
return false ;
}
2022-11-28 16:01:08 +00:00
} while ( state - > pendingWorkItems . empty ( ) & & block ) ;
2021-11-05 17:34:23 +00:00
2023-05-25 00:55:55 +00:00
if ( group - > workQueue . IsEmpty ( state - > id ) )
2021-11-05 17:34:23 +00:00
{
2023-05-25 00:55:55 +00:00
state - > eventLs - > Reset ( ) ;
2021-11-05 17:34:23 +00:00
}
}
if ( state - > pendingWorkItems . empty ( ) )
{
2023-05-25 00:55:55 +00:00
if ( InRunnerMode ( ) )
{
2023-08-09 02:21:14 +00:00
if ( ( this - > uAtomicCounter = = 0 ) & &
2023-08-13 08:30:17 +00:00
this - > IsDepleted ( ) )
2023-05-25 00:55:55 +00:00
{
Shutdown ( ) ;
}
}
2021-11-05 17:34:23 +00:00
return false ;
}
int runningTasks { } ;
2022-01-19 17:08:13 +00:00
auto oldTlsHandle = AuExchange ( gCurrentPool , AuSharedFromThis ( ) ) ;
2021-11-05 17:34:23 +00:00
bool lowPrioCont { } ;
bool lowPrioContCached { } ;
2022-05-19 23:56:58 +00:00
state - > cookie + + ;
int start = state - > cookie ;
2022-06-11 23:01:27 +00:00
// Account for
// while (AuAsync.GetCurrentPool()->runForever());
// in the first task (or deeper)
if ( InRunnerMode ( ) & & tlsCallStack ) // are we one call deep?
{
auto queue = ToKernelWorkQueue ( ) ;
2023-08-09 02:21:14 +00:00
if ( ( this - > uAtomicCounter = = tlsCallStack ) & &
2023-08-13 08:30:17 +00:00
this - > IsDepleted ( ) )
2022-06-11 23:01:27 +00:00
{
return false ;
}
}
//
2021-11-05 17:34:23 +00:00
for ( auto itr = state - > pendingWorkItems . begin ( ) ; itr ! = state - > pendingWorkItems . end ( ) ; )
{
2023-02-08 18:23:37 +00:00
if ( state - > threadObject - > Exiting ( ) | | this - > shutdown )
2021-11-05 17:34:23 +00:00
{
break ;
}
// Set the last frame time for a watchdog later down the line
state - > lastFrameTime = Time : : CurrentClockMS ( ) ;
// Dispatch
2022-05-19 03:07:10 +00:00
auto oops = itr - > second ;
2021-11-05 17:34:23 +00:00
// Remove from our local job queue
itr = state - > pendingWorkItems . erase ( itr ) ;
2022-06-11 23:01:27 +00:00
tlsCallStack + + ;
2022-11-28 16:01:08 +00:00
//SysBenchmark(fmt::format("RunAsync: {}", block));
2022-05-19 03:07:10 +00:00
// Dispatch
oops - > RunAsync ( ) ;
2023-03-05 12:55:07 +00:00
uCount + + ;
2022-05-19 03:07:10 +00:00
2021-11-05 17:34:23 +00:00
// Atomically decrement global task counter
2023-08-09 02:21:14 +00:00
runningTasks = AuAtomicSub ( & this - > uAtomicCounter , 1u ) ;
2022-05-19 23:56:58 +00:00
2022-06-11 23:01:27 +00:00
tlsCallStack - - ;
2022-05-19 23:56:58 +00:00
if ( start ! = state - > cookie )
{
start = state - > cookie ;
itr = state - > pendingWorkItems . begin ( ) ;
}
2021-11-05 17:34:23 +00:00
}
gCurrentPool = oldTlsHandle ;
// Return popped work back to the groups work pool when our -pump loops were preempted
if ( state - > pendingWorkItems . size ( ) )
{
2023-05-25 00:55:55 +00:00
AU_LOCK_GUARD ( state - > cvWorkMutex ) ;
for ( const auto & item : state - > pendingWorkItems )
{
group - > workQueue . AddWorkEntry ( item ) ;
}
state - > pendingWorkItems . clear ( ) ;
state - > cvVariable - > Broadcast ( ) ;
state - > eventLs - > Set ( ) ;
2021-11-05 17:34:23 +00:00
state - > pendingWorkItems . clear ( ) ;
}
2022-06-11 23:01:27 +00:00
// Account for
// while (AuAsync.GetCurrentPool()->runForever());
// in the top most task
2021-11-05 17:34:23 +00:00
if ( InRunnerMode ( ) )
{
2022-06-11 23:01:27 +00:00
auto queue = ToKernelWorkQueue ( ) ;
if ( ( runningTasks = = 0 ) & &
2023-08-09 09:25:22 +00:00
( this - > uAtomicCounter = = 0 ) & &
2023-08-13 08:30:17 +00:00
this - > IsDepleted ( ) )
2021-11-05 17:34:23 +00:00
{
Shutdown ( ) ;
}
}
return true ;
}
2023-05-24 07:19:47 +00:00
// While much of this subsystem needs good rewrite, under no circumstance should the shutdown process be "simpified" or "cleaned up"
// This is our expected behaviour. Any changes will likely introduce hard to catch bugs across various softwares and exit conditions.
2021-11-05 17:34:23 +00:00
void ThreadPool : : Shutdown ( )
{
2023-01-30 14:32:26 +00:00
auto trySelfPid = AuAsync : : GetCurrentWorkerPId ( ) ;
// Update shutting down flag
2023-02-08 18:23:37 +00:00
// Specify the root-level shutdown flag for 'ok, u can work, but you're shutting down soon [microseconds, probably]'
2021-11-05 17:34:23 +00:00
{
2023-01-30 14:32:26 +00:00
if ( AuAtomicTestAndSet ( & this - > shuttingdown_ , 0 ) ! = 0 )
2021-11-05 17:34:23 +00:00
{
return ;
}
}
2023-02-04 19:43:01 +00:00
2023-02-08 18:23:37 +00:00
auto pLocalRunner = this - > GetThreadStateNoWarn ( ) ;
AuList < WorkerId_t > toBarrier ;
2021-11-05 17:34:23 +00:00
2023-08-10 00:20:34 +00:00
{
2021-11-05 17:34:23 +00:00
{
2023-01-30 14:32:26 +00:00
AU_LOCK_GUARD ( this - > rwlock_ - > AsReadable ( ) ) ;
2023-08-10 00:31:10 +00:00
for ( auto pGroup : this - > threadGroups_ )
2021-11-05 17:34:23 +00:00
{
2023-08-10 00:31:10 +00:00
if ( ! pGroup )
{
continue ;
}
for ( auto & [ id , worker ] : pGroup - > workers )
2023-01-30 14:32:26 +00:00
{
if ( trySelfPid = = worker - > id )
{
continue ;
}
toBarrier . push_back ( worker - > id ) ;
}
2021-11-05 17:34:23 +00:00
}
}
2023-01-30 14:32:26 +00:00
2023-06-07 19:35:31 +00:00
2023-01-30 14:32:26 +00:00
for ( const auto & id : toBarrier )
{
2023-02-08 18:23:37 +00:00
if ( trySelfPid = = id )
{
continue ;
}
this - > Barrier ( id , 0 , false , false /* no reject*/ ) ; // absolute safest point in shutdown; sync to already submitted work
2023-01-30 14:32:26 +00:00
}
2021-11-05 17:34:23 +00:00
}
2023-02-08 18:23:37 +00:00
// Time for fuckiness
// Specify the root-level shutdown flag for 'ok, u can work, but you're shutting down after sync barrier'
{
AuAtomicTestAndSet ( & this - > shuttingdown_ , 1 ) ;
}
2021-11-05 17:34:23 +00:00
// Finally set the shutdown flag on all of our thread contexts
// then release them from the runners/workers list
// then release all group contexts
AuList < AuThreads : : ThreadShared_t > threads ;
2023-06-07 19:35:31 +00:00
AuList < AuSPtr < ThreadState > > states ;
2021-11-05 17:34:23 +00:00
{
2023-06-07 19:35:31 +00:00
AU_LOCK_GUARD ( this - > rwlock_ - > AsReadable ( ) ) ;
2021-11-05 17:34:23 +00:00
2023-08-10 00:31:10 +00:00
for ( auto pGroup : this - > threadGroups_ )
2021-11-05 17:34:23 +00:00
{
2023-08-10 00:31:10 +00:00
if ( ! pGroup )
{
continue ;
}
for ( auto & [ id , pState ] : pGroup - > workers )
2021-11-05 17:34:23 +00:00
{
2023-06-07 19:35:31 +00:00
// main loop:
if ( pState & & pState - > cvWorkMutex & & pState - > cvVariable )
2022-01-19 02:49:44 +00:00
{
2023-06-07 19:35:31 +00:00
states . push_back ( pState ) ;
pState - > shuttingdown = true ;
2022-01-19 02:49:44 +00:00
}
2022-01-19 11:47:29 +00:00
else
{
2023-06-07 19:35:31 +00:00
pState - > shuttingdown = true ;
2022-01-19 11:47:29 +00:00
}
2021-11-05 17:34:23 +00:00
2023-06-07 19:35:31 +00:00
// thread object:
2023-08-10 00:31:10 +00:00
if ( ! pGroup - > IsSysThread ( ) ) // bug?
2021-11-05 17:34:23 +00:00
{
2023-06-07 19:35:31 +00:00
pState - > threadObject - > SendExitSignal ( ) ;
threads . push_back ( pState - > threadObject ) ;
2021-11-05 17:34:23 +00:00
}
2023-06-07 19:35:31 +00:00
// unrefreeze signals:
auto & event = pState - > running ;
2021-11-05 17:34:23 +00:00
if ( event )
{
event - > Set ( ) ;
}
}
}
}
2023-06-07 19:35:31 +00:00
{
for ( const auto & pState : states )
{
AU_LOCK_GUARD ( pState - > cvWorkMutex ) ;
pState - > cvVariable - > Broadcast ( ) ;
pState - > eventLs - > Set ( ) ;
}
}
2023-02-08 18:23:37 +00:00
// Final sync to exit
2023-06-07 19:35:31 +00:00
2023-02-08 18:23:37 +00:00
{
for ( const auto & id : toBarrier )
{
if ( trySelfPid = = id )
{
continue ;
}
auto handle = this - > GetThreadHandle ( id ) ;
if ( handle )
{
handle - > rejecting = false ;
}
2023-06-07 19:35:31 +00:00
handle - > isDeadEvent - > LockMS ( 250 ) ;
2023-02-08 18:23:37 +00:00
}
}
2021-11-05 17:34:23 +00:00
// Sync to shutdown threads to prevent a race condition whereby the async subsystem shuts down before the threads
for ( const auto & thread : threads )
{
thread - > Exit ( ) ;
}
2023-02-08 18:23:37 +00:00
// Is dead flag
this - > shutdown = true ;
2023-05-24 07:19:47 +00:00
this - > shutdownEvent_ - > Set ( ) ;
2023-02-08 18:23:37 +00:00
if ( pLocalRunner )
{
pLocalRunner - > bIsKiller = true ;
}
2023-08-17 10:46:25 +00:00
for ( const auto & wOther : this - > listWeakDepsParents_ )
{
if ( auto pThat = AuTryLockMemoryType ( wOther ) )
{
if ( pThat - > InRunnerMode ( ) )
{
continue ;
}
if ( ! pThat - > IsSelfDepleted ( ) )
{
continue ;
}
if ( pThat - > uAtomicCounter )
{
continue ;
}
pThat - > Shutdown ( ) ;
}
}
2021-11-05 17:34:23 +00:00
}
2023-02-04 19:43:01 +00:00
2021-11-05 17:34:23 +00:00
bool ThreadPool : : Exiting ( )
{
2023-01-30 13:26:17 +00:00
return this - > shuttingdown_ | |
GetThreadState ( ) - > exiting ;
2021-11-05 17:34:23 +00:00
}
2023-03-05 12:55:07 +00:00
AuUInt32 ThreadPool : : PollAndCount ( bool bStrict )
{
AuUInt32 uCount { } ;
auto bRanAtLeastOne = this - > InternalRunOne ( false , uCount ) ;
return uCount ? uCount : ( bStrict ? bRanAtLeastOne : 0 ) ;
}
AuUInt32 ThreadPool : : RunAllPending ( )
{
AuUInt32 uCount { } ;
bool ranAtLeastOne { } ;
do
{
uCount = 0 ;
ranAtLeastOne | = this - > InternalRunOne ( false , uCount ) ;
}
while ( uCount ) ;
return uCount ? uCount : false ;
}
2023-01-30 13:26:17 +00:00
AuSPtr < IWorkItem > ThreadPool : : NewWorkItem ( const WorkerId_t & worker ,
2023-08-10 02:34:44 +00:00
const AuSPtr < IWorkItemHandler > & task )
2021-11-05 17:34:23 +00:00
{
2023-01-23 21:18:58 +00:00
// Error pass-through
2021-11-05 17:34:23 +00:00
if ( ! task )
{
return { } ;
}
2023-01-23 21:18:58 +00:00
2023-08-10 02:34:44 +00:00
return AuMakeShared < WorkItem > ( this , WorkerPId_t { this - > SharedFromThis ( ) , worker } , task ) ;
2021-11-05 17:34:23 +00:00
}
2023-08-10 02:34:44 +00:00
AuSPtr < IWorkItem > ThreadPool : : NewWorkFunction ( const WorkerId_t & worker ,
AuVoidFunc callback )
{
SysAssert ( callback ) ;
return AuMakeShared < FuncWorker > ( this , WorkerPId_t { this - > SharedFromThis ( ) , worker } , AuMove ( callback ) ) ;
}
2021-11-05 17:34:23 +00:00
AuSPtr < IWorkItem > ThreadPool : : NewFence ( )
{
2023-08-10 02:34:44 +00:00
return AuMakeShared < WorkItem > ( this , AuAsync : : GetCurrentWorkerPId ( ) , AuSPtr < IWorkItemHandler > { } ) ;
2021-11-05 17:34:23 +00:00
}
AuThreads : : ThreadShared_t ThreadPool : : ResolveHandle ( WorkerId_t id )
{
2023-02-06 04:07:54 +00:00
auto pState = GetThreadHandle ( id ) ;
if ( ! pState )
{
return { } ;
}
return pState - > threadObject ;
2021-11-05 17:34:23 +00:00
}
AuBST < ThreadGroup_t , AuList < ThreadId_t > > ThreadPool : : GetThreads ( )
{
AU_LOCK_GUARD ( rwlock_ - > AsReadable ( ) ) ;
AuBST < ThreadGroup_t , AuList < ThreadId_t > > ret ;
2023-08-10 00:31:10 +00:00
for ( auto pGroup : this - > threadGroups_ )
2021-11-05 17:34:23 +00:00
{
AuList < ThreadId_t > workers ;
2023-01-30 13:26:17 +00:00
2023-08-10 00:31:10 +00:00
if ( ! pGroup )
{
continue ;
}
2023-01-30 13:26:17 +00:00
2023-08-10 00:31:10 +00:00
AuTryReserve ( workers , pGroup - > workers . size ( ) ) ;
for ( const auto & thread : pGroup - > workers )
2021-11-05 17:34:23 +00:00
{
workers . push_back ( thread . second - > id . second ) ;
}
2023-08-10 00:31:10 +00:00
ret [ pGroup - > group ] = workers ;
2021-11-05 17:34:23 +00:00
}
return ret ;
}
WorkerId_t ThreadPool : : GetCurrentThread ( )
{
return tlsWorkerId ;
}
bool ThreadPool : : Sync ( WorkerId_t workerId , AuUInt32 timeoutMs , bool requireSignal )
{
2021-11-07 20:17:08 +00:00
AU_LOCK_GUARD ( this - > rwlock_ - > AsReadable ( ) ) ;
2021-11-05 17:34:23 +00:00
auto group = GetGroup ( workerId . first ) ;
auto currentWorkerId = GetCurrentThread ( ) . second ;
if ( workerId . second = = Async : : kThreadIdAny )
{
for ( auto & jobWorker : group - > workers )
{
if ( ! Barrier ( jobWorker . second - > id , timeoutMs , requireSignal & & jobWorker . second - > id . second ! = currentWorkerId , false ) ) // BAD!, should subtract time elapsed, clamp to, i dunno, 5ms min?
{
return false ;
}
}
}
else
{
return Barrier ( workerId , timeoutMs , requireSignal & & workerId . second ! = currentWorkerId , false ) ;
}
return true ;
}
void ThreadPool : : Signal ( WorkerId_t workerId )
{
2021-11-07 20:17:08 +00:00
AU_LOCK_GUARD ( this - > rwlock_ - > AsReadable ( ) ) ;
2021-11-05 17:34:23 +00:00
auto group = GetGroup ( workerId . first ) ;
if ( workerId . second = = Async : : kThreadIdAny )
{
for ( auto & jobWorker : group - > workers )
{
jobWorker . second - > running - > Set ( ) ;
}
}
else
{
GetThreadHandle ( workerId ) - > running - > Set ( ) ;
}
}
[*/+/-] MEGA COMMIT. ~2 weeks compressed.
The intention is to quickly improve and add util apis, enhance functionality given current demands, go back to the build pipeline, finish that, publish runtime tests, and then use what we have to go back to to linux support with a more stable api.
[+] AuMakeSharedArray
[+] Technet ArgvQuote
[+] Grug subsystem (UNIX signal thread async safe ipc + telemetry flusher + log flusher.)
[+] auEndianness -> Endian swap utils
[+] AuGet<N>(...)
[*] AUE_DEFINE conversion for
ECompresionType, EAnsiColor, EHashType, EStreamError, EHexDump
[+] ConsoleMessage ByteBuffer serialization
[+] CmdLine subsystem for parsing command line arguments and simple switch/flag checks
[*] Split logger from console subsystem
[+] StartupParameters -> A part of a clean up effort under Process
[*] Refactor SysErrors header + get caller hack
[+] Atomic APIs
[+] popcnt
[+] Ring Buffer sink
[+] Added more standard errors
Catch,
Submission,
LockError,
NoAccess,
ResourceMissing,
ResourceLocked,
MalformedData,
InSandboxContext,
ParseError
[+] Added ErrorCategorySet, ErrorCategoryClear, GetStackTrace
[+] IExitSubscriber, ETriggerLevel
[*] Write bias the high performance RWLockImpl read-lock operation operation
[+] ExitHandlerAdd/ExitHandlerRemove (exit subsystem)
[*] Updated API style
Digests
[+] CpuId::CpuBitCount
[+] GetUserProgramsFolder
[+] GetPackagePath
[*] Split IStreamReader with an inl file
[*] BlobWriter/BlobReader/BlobArbitraryReader can now take shared pointers to bytebuffers. default constructor allocates a new scalable bytebuffer
[+] ICharacterProvider
[+] ICharacterProviderEx
[+] IBufferedCharacterConsumer
[+] ProviderFromSharedString
[+] ProviderFromString
[+] BufferConsumerFromProvider
[*] Parse Subsystem uses character io bufferer
[*] Rewritten NT's high perf semaphore to use userland SRW/ConVars [like mutex, based on generic semaphore]
[+] ByteBuffer::ResetReadPointer
[*] Bug fix bytebuffer base not reset on free and some scaling issues
[+] ProcessMap -> Added kSectionNameStack, kSectionNameFile, kSectionNameHeap for Section
[*] ProcessMap -> Refactor Segment to Section. I was stupid for keeping a type conflict hack API facing
[+] Added 64 *byte* fast RNG seeds
[+] File Advisorys/File Lock Awareness
[+] Added extended IAuroraThread from OS identifier caches for debug purposes
[*] Tweaked how memory is reported on Windows. Better consistency of what values mean across functions.
[*] Broke AuroraUtils/Typedefs out into a separate library
[*] Update build script
[+] Put some more effort into adding detail to the readme before rewriting it, plus, added some media
[*] Improved public API documentation
[*] Bug fix `SetConsoleCtrlHandler`
[+] Locale TimeDateToFileNameISO8601
[+] Console config stdOutShortTime
[*] Begin using internal UTF8/16 decoders when platform support isnt available (instead of stl)
[*] Bug fixes in decoders
[*] Major bug fix, AuMax
[+] RateLimiter
[+] Binary file sink
[+] Log directory sink
[*] Data header usability (more operators)
[+] AuRemoveRange
[+] AuRemove
[+] AuTryRemove
[+] AuTryRemoveRange
[+] auCastUtils
[+] Finish NewLSWin32Source
[+] AuTryFindByTupleN, AuTryRemoveByTupleN
[+] Separated AuRead/Write types, now in auTypeUtils
[+] Added GetPosition/SetPosition to FileWriter
[*] Fix stupid AuMin in place of AuMax in SpawnThread.Unix.Cpp
[*] Refactored Arbitrary readers to SeekingReaders (as in, they could be atomic and/or parallelized, and accept an arbitrary position as a work parameter -> not Seekable, as in, you can simply set the position)
[*] Hack back in the sched deinit
[+] File AIO loop source interop
[+] Begin to prototype a LoopQueue object I had in mind for NT, untested btw
[+] Stub code for networking
[+] Compression BaseStream/IngestableStreamBase
[*] Major: read/write locks now support write-entrant read routines.
[*] Compression subsystem now uses the MemoryView concept
[*] Rewrite the base stream compressions, made them less broken
[*] Update hashing api
[*] WriterTryGoForward and ReaderTryGoForward now revert to the previous relative index instead of panicing
[+] Added new AuByteBuffer apis
Trim, Pad, WriteFrom, WriteString, [TODO: ReadString]
[+] Added ByteBufferPushReadState
[+] Added ByteBufferPushWriteState
[*] Move from USC-16 to full UTF-16. Win32 can handle full UTF-16.
[*] ELogLevel is now an Aurora enum
[+] Raised arbitrary limit in header to 255, the max filter buffer
[+] Explicit GZip support
[+] Explicit Zip support
[+] Added [some] compressors
et al
2022-02-17 00:11:40 +00:00
2022-06-11 23:52:46 +00:00
AuSPtr < AuLoop : : ILoopSource > ThreadPool : : WorkerToLoopSource ( WorkerId_t workerId )
[*/+/-] MEGA COMMIT. ~2 weeks compressed.
The intention is to quickly improve and add util apis, enhance functionality given current demands, go back to the build pipeline, finish that, publish runtime tests, and then use what we have to go back to to linux support with a more stable api.
[+] AuMakeSharedArray
[+] Technet ArgvQuote
[+] Grug subsystem (UNIX signal thread async safe ipc + telemetry flusher + log flusher.)
[+] auEndianness -> Endian swap utils
[+] AuGet<N>(...)
[*] AUE_DEFINE conversion for
ECompresionType, EAnsiColor, EHashType, EStreamError, EHexDump
[+] ConsoleMessage ByteBuffer serialization
[+] CmdLine subsystem for parsing command line arguments and simple switch/flag checks
[*] Split logger from console subsystem
[+] StartupParameters -> A part of a clean up effort under Process
[*] Refactor SysErrors header + get caller hack
[+] Atomic APIs
[+] popcnt
[+] Ring Buffer sink
[+] Added more standard errors
Catch,
Submission,
LockError,
NoAccess,
ResourceMissing,
ResourceLocked,
MalformedData,
InSandboxContext,
ParseError
[+] Added ErrorCategorySet, ErrorCategoryClear, GetStackTrace
[+] IExitSubscriber, ETriggerLevel
[*] Write bias the high performance RWLockImpl read-lock operation operation
[+] ExitHandlerAdd/ExitHandlerRemove (exit subsystem)
[*] Updated API style
Digests
[+] CpuId::CpuBitCount
[+] GetUserProgramsFolder
[+] GetPackagePath
[*] Split IStreamReader with an inl file
[*] BlobWriter/BlobReader/BlobArbitraryReader can now take shared pointers to bytebuffers. default constructor allocates a new scalable bytebuffer
[+] ICharacterProvider
[+] ICharacterProviderEx
[+] IBufferedCharacterConsumer
[+] ProviderFromSharedString
[+] ProviderFromString
[+] BufferConsumerFromProvider
[*] Parse Subsystem uses character io bufferer
[*] Rewritten NT's high perf semaphore to use userland SRW/ConVars [like mutex, based on generic semaphore]
[+] ByteBuffer::ResetReadPointer
[*] Bug fix bytebuffer base not reset on free and some scaling issues
[+] ProcessMap -> Added kSectionNameStack, kSectionNameFile, kSectionNameHeap for Section
[*] ProcessMap -> Refactor Segment to Section. I was stupid for keeping a type conflict hack API facing
[+] Added 64 *byte* fast RNG seeds
[+] File Advisorys/File Lock Awareness
[+] Added extended IAuroraThread from OS identifier caches for debug purposes
[*] Tweaked how memory is reported on Windows. Better consistency of what values mean across functions.
[*] Broke AuroraUtils/Typedefs out into a separate library
[*] Update build script
[+] Put some more effort into adding detail to the readme before rewriting it, plus, added some media
[*] Improved public API documentation
[*] Bug fix `SetConsoleCtrlHandler`
[+] Locale TimeDateToFileNameISO8601
[+] Console config stdOutShortTime
[*] Begin using internal UTF8/16 decoders when platform support isnt available (instead of stl)
[*] Bug fixes in decoders
[*] Major bug fix, AuMax
[+] RateLimiter
[+] Binary file sink
[+] Log directory sink
[*] Data header usability (more operators)
[+] AuRemoveRange
[+] AuRemove
[+] AuTryRemove
[+] AuTryRemoveRange
[+] auCastUtils
[+] Finish NewLSWin32Source
[+] AuTryFindByTupleN, AuTryRemoveByTupleN
[+] Separated AuRead/Write types, now in auTypeUtils
[+] Added GetPosition/SetPosition to FileWriter
[*] Fix stupid AuMin in place of AuMax in SpawnThread.Unix.Cpp
[*] Refactored Arbitrary readers to SeekingReaders (as in, they could be atomic and/or parallelized, and accept an arbitrary position as a work parameter -> not Seekable, as in, you can simply set the position)
[*] Hack back in the sched deinit
[+] File AIO loop source interop
[+] Begin to prototype a LoopQueue object I had in mind for NT, untested btw
[+] Stub code for networking
[+] Compression BaseStream/IngestableStreamBase
[*] Major: read/write locks now support write-entrant read routines.
[*] Compression subsystem now uses the MemoryView concept
[*] Rewrite the base stream compressions, made them less broken
[*] Update hashing api
[*] WriterTryGoForward and ReaderTryGoForward now revert to the previous relative index instead of panicing
[+] Added new AuByteBuffer apis
Trim, Pad, WriteFrom, WriteString, [TODO: ReadString]
[+] Added ByteBufferPushReadState
[+] Added ByteBufferPushWriteState
[*] Move from USC-16 to full UTF-16. Win32 can handle full UTF-16.
[*] ELogLevel is now an Aurora enum
[+] Raised arbitrary limit in header to 255, the max filter buffer
[+] Explicit GZip support
[+] Explicit Zip support
[+] Added [some] compressors
et al
2022-02-17 00:11:40 +00:00
{
AU_LOCK_GUARD ( this - > rwlock_ - > AsReadable ( ) ) ;
auto a = GetThreadHandle ( workerId ) ;
if ( ! a )
{
return { } ;
}
2023-05-25 00:55:55 +00:00
return a - > asyncLoopSourceShared ;
[*/+/-] MEGA COMMIT. ~2 weeks compressed.
The intention is to quickly improve and add util apis, enhance functionality given current demands, go back to the build pipeline, finish that, publish runtime tests, and then use what we have to go back to to linux support with a more stable api.
[+] AuMakeSharedArray
[+] Technet ArgvQuote
[+] Grug subsystem (UNIX signal thread async safe ipc + telemetry flusher + log flusher.)
[+] auEndianness -> Endian swap utils
[+] AuGet<N>(...)
[*] AUE_DEFINE conversion for
ECompresionType, EAnsiColor, EHashType, EStreamError, EHexDump
[+] ConsoleMessage ByteBuffer serialization
[+] CmdLine subsystem for parsing command line arguments and simple switch/flag checks
[*] Split logger from console subsystem
[+] StartupParameters -> A part of a clean up effort under Process
[*] Refactor SysErrors header + get caller hack
[+] Atomic APIs
[+] popcnt
[+] Ring Buffer sink
[+] Added more standard errors
Catch,
Submission,
LockError,
NoAccess,
ResourceMissing,
ResourceLocked,
MalformedData,
InSandboxContext,
ParseError
[+] Added ErrorCategorySet, ErrorCategoryClear, GetStackTrace
[+] IExitSubscriber, ETriggerLevel
[*] Write bias the high performance RWLockImpl read-lock operation operation
[+] ExitHandlerAdd/ExitHandlerRemove (exit subsystem)
[*] Updated API style
Digests
[+] CpuId::CpuBitCount
[+] GetUserProgramsFolder
[+] GetPackagePath
[*] Split IStreamReader with an inl file
[*] BlobWriter/BlobReader/BlobArbitraryReader can now take shared pointers to bytebuffers. default constructor allocates a new scalable bytebuffer
[+] ICharacterProvider
[+] ICharacterProviderEx
[+] IBufferedCharacterConsumer
[+] ProviderFromSharedString
[+] ProviderFromString
[+] BufferConsumerFromProvider
[*] Parse Subsystem uses character io bufferer
[*] Rewritten NT's high perf semaphore to use userland SRW/ConVars [like mutex, based on generic semaphore]
[+] ByteBuffer::ResetReadPointer
[*] Bug fix bytebuffer base not reset on free and some scaling issues
[+] ProcessMap -> Added kSectionNameStack, kSectionNameFile, kSectionNameHeap for Section
[*] ProcessMap -> Refactor Segment to Section. I was stupid for keeping a type conflict hack API facing
[+] Added 64 *byte* fast RNG seeds
[+] File Advisorys/File Lock Awareness
[+] Added extended IAuroraThread from OS identifier caches for debug purposes
[*] Tweaked how memory is reported on Windows. Better consistency of what values mean across functions.
[*] Broke AuroraUtils/Typedefs out into a separate library
[*] Update build script
[+] Put some more effort into adding detail to the readme before rewriting it, plus, added some media
[*] Improved public API documentation
[*] Bug fix `SetConsoleCtrlHandler`
[+] Locale TimeDateToFileNameISO8601
[+] Console config stdOutShortTime
[*] Begin using internal UTF8/16 decoders when platform support isnt available (instead of stl)
[*] Bug fixes in decoders
[*] Major bug fix, AuMax
[+] RateLimiter
[+] Binary file sink
[+] Log directory sink
[*] Data header usability (more operators)
[+] AuRemoveRange
[+] AuRemove
[+] AuTryRemove
[+] AuTryRemoveRange
[+] auCastUtils
[+] Finish NewLSWin32Source
[+] AuTryFindByTupleN, AuTryRemoveByTupleN
[+] Separated AuRead/Write types, now in auTypeUtils
[+] Added GetPosition/SetPosition to FileWriter
[*] Fix stupid AuMin in place of AuMax in SpawnThread.Unix.Cpp
[*] Refactored Arbitrary readers to SeekingReaders (as in, they could be atomic and/or parallelized, and accept an arbitrary position as a work parameter -> not Seekable, as in, you can simply set the position)
[*] Hack back in the sched deinit
[+] File AIO loop source interop
[+] Begin to prototype a LoopQueue object I had in mind for NT, untested btw
[+] Stub code for networking
[+] Compression BaseStream/IngestableStreamBase
[*] Major: read/write locks now support write-entrant read routines.
[*] Compression subsystem now uses the MemoryView concept
[*] Rewrite the base stream compressions, made them less broken
[*] Update hashing api
[*] WriterTryGoForward and ReaderTryGoForward now revert to the previous relative index instead of panicing
[+] Added new AuByteBuffer apis
Trim, Pad, WriteFrom, WriteString, [TODO: ReadString]
[+] Added ByteBufferPushReadState
[+] Added ByteBufferPushWriteState
[*] Move from USC-16 to full UTF-16. Win32 can handle full UTF-16.
[*] ELogLevel is now an Aurora enum
[+] Raised arbitrary limit in header to 255, the max filter buffer
[+] Explicit GZip support
[+] Explicit Zip support
[+] Added [some] compressors
et al
2022-02-17 00:11:40 +00:00
}
2021-11-05 17:34:23 +00:00
void ThreadPool : : SyncAllSafe ( )
{
2021-11-07 20:17:08 +00:00
AU_LOCK_GUARD ( this - > rwlock_ - > AsReadable ( ) ) ;
2021-11-05 17:34:23 +00:00
2023-08-10 00:31:10 +00:00
for ( auto pGroup : this - > threadGroups_ )
2021-11-05 17:34:23 +00:00
{
2023-08-10 00:31:10 +00:00
if ( ! pGroup )
{
continue ;
}
for ( auto & jobWorker : pGroup - > workers )
2021-11-05 17:34:23 +00:00
{
SysAssert ( Barrier ( jobWorker . second - > id , 0 , false , false ) ) ;
}
}
}
2023-01-30 13:26:17 +00:00
void ThreadPool : : AddFeature ( WorkerId_t id ,
AuSPtr < AuThreads : : IThreadFeature > pFeature ,
bool bNonBlock )
2021-11-05 17:34:23 +00:00
{
2023-01-30 13:26:17 +00:00
auto work = AuMakeSharedThrow < BasicWorkStdFunc > ( ( [ = ] ( )
2021-11-05 17:34:23 +00:00
{
2023-01-30 13:26:17 +00:00
GetThreadState ( ) - > features . push_back ( pFeature ) ;
pFeature - > Init ( ) ;
2021-11-05 17:34:23 +00:00
} ) ) ;
2022-06-11 23:01:27 +00:00
2023-08-10 02:34:44 +00:00
auto pWorkItem = this - > NewWorkItem ( id , work ) - > Dispatch ( ) ;
2023-01-30 13:26:17 +00:00
SysAssert ( pWorkItem ) ;
2021-11-05 17:34:23 +00:00
2023-01-30 13:26:17 +00:00
if ( ! bNonBlock )
2021-11-05 17:34:23 +00:00
{
2023-01-30 13:26:17 +00:00
pWorkItem - > BlockUntilComplete ( ) ;
2021-11-05 17:34:23 +00:00
}
}
void ThreadPool : : AssertInThreadGroup ( ThreadGroup_t group )
{
SysAssert ( static_cast < WorkerId_t > ( tlsWorkerId ) . first = = group ) ;
}
void ThreadPool : : AssertWorker ( WorkerId_t id )
{
SysAssert ( static_cast < WorkerId_t > ( tlsWorkerId ) = = id ) ;
}
2022-06-11 23:52:46 +00:00
AuSPtr < AuLoop : : ILoopQueue > ThreadPool : : ToKernelWorkQueue ( )
2021-11-05 17:34:23 +00:00
{
2022-03-10 15:35:01 +00:00
return this - > GetThreadState ( ) - > asyncLoop ;
}
2022-06-11 23:52:46 +00:00
AuSPtr < AuLoop : : ILoopQueue > ThreadPool : : ToKernelWorkQueue ( WorkerId_t workerId )
2022-03-10 15:35:01 +00:00
{
auto worker = this - > GetThreadHandle ( workerId ) ;
if ( ! worker )
2021-11-05 17:34:23 +00:00
{
2023-01-30 13:26:17 +00:00
SysPushErrorGeneric ( " Couldn't find requested worker " ) ;
2022-03-10 15:35:01 +00:00
return { } ;
2021-11-05 17:34:23 +00:00
}
2022-03-10 15:35:01 +00:00
return worker - > asyncLoop ;
}
2021-11-05 17:34:23 +00:00
2022-03-10 15:35:01 +00:00
void ThreadPool : : UpdateWorkMode ( WorkerId_t workerId , RunMode mode )
{
auto states = this - > GetThreadHandles ( workerId ) ;
if ( ! states . size ( ) )
2021-11-05 17:34:23 +00:00
{
2023-01-30 13:26:17 +00:00
SysPushErrorGeneric ( " Couldn't find requested worker " ) ;
2022-03-10 15:35:01 +00:00
return ;
}
2021-11-05 17:34:23 +00:00
2022-03-10 15:35:01 +00:00
for ( const auto & state : states )
{
state - > runMode = mode . mode ;
if ( mode . freqMsTick )
2021-11-05 17:34:23 +00:00
{
2022-03-10 15:35:01 +00:00
state - > rateLimiter . SetNextStep ( mode . freqMsTick * 1'000'000 ) ;
2021-11-05 17:34:23 +00:00
}
2022-03-10 15:35:01 +00:00
}
}
2021-11-05 17:34:23 +00:00
2022-03-10 15:35:01 +00:00
ERunMode ThreadPool : : GetCurrentThreadRunMode ( )
{
auto state = this - > GetThreadState ( ) ;
if ( ! state )
{
return ERunMode : : eEfficient ;
2021-11-05 17:34:23 +00:00
}
2022-03-10 15:35:01 +00:00
return state - > runMode ;
}
2021-11-05 17:34:23 +00:00
2022-03-10 15:35:01 +00:00
ERunMode ThreadPool : : GetThreadRunMode ( WorkerId_t workerId )
{
auto worker = this - > GetThreadHandle ( workerId ) ;
if ( ! worker )
{
2023-01-30 13:26:17 +00:00
SysPushErrorGeneric ( " Couldn't find requested worker " ) ;
2022-03-10 15:35:01 +00:00
return { } ;
}
return worker - > runMode ;
2021-11-05 17:34:23 +00:00
}
2023-08-13 08:30:17 +00:00
bool ThreadPool : : IsSelfDepleted ( )
{
auto queue = ToKernelWorkQueue ( ) ;
return ( ! queue | | queue - > GetSourceCount ( ) < = 1 + this - > uAtomicIOProcessorsWorthlessSources + this - > uAtomicIOProcessors ) ;
}
bool ThreadPool : : IsDepleted ( )
{
if ( ! IsSelfDepleted ( ) )
{
return false ;
}
for ( const auto & wOther : this - > listWeakDeps_ )
{
if ( auto pThat = AuTryLockMemoryType ( wOther ) )
{
if ( ! pThat - > IsSelfDepleted ( ) )
{
return false ;
}
if ( pThat - > uAtomicCounter )
{
return false ;
}
}
}
return true ;
}
void ThreadPool : : AddDependency ( AuSPtr < IThreadPool > pPool )
{
2023-08-17 10:46:25 +00:00
if ( ! pPool )
{
return ;
}
auto pOther = AuStaticCast < ThreadPool > ( pPool ) ;
this - > listWeakDeps_ . push_back ( pOther ) ;
pOther - > listWeakDepsParents_ . push_back ( AuSharedFromThis ( ) ) ;
2023-08-13 08:30:17 +00:00
}
2023-05-24 07:19:47 +00:00
AuSPtr < AuThreading : : IWaitable > ThreadPool : : GetShutdownEvent ( )
{
return AuSPtr < AuThreading : : IWaitable > ( AuSharedFromThis ( ) , this - > shutdownEvent_ . AsPointer ( ) ) ;
}
// Unimplemented fiber hooks, 'twas used for science. no longer in use
2021-11-05 17:34:23 +00:00
int ThreadPool : : CtxPollPush ( )
{
// TOOD (Reece): implement a context switching library
// Refer to the old implementation of this on pastebin
return 0 ;
}
void ThreadPool : : CtxPollReturn ( const AuSPtr < ThreadState > & state , int status , bool hitTask )
{
}
bool ThreadPool : : CtxYield ( )
{
bool ranAtLeastOne = false ;
2022-11-20 10:31:13 +00:00
// !!!
2021-11-05 17:34:23 +00:00
2023-02-08 18:23:37 +00:00
if ( this - > shutdown | |
this - > shuttingdown_ & 2 ) // fast
{
if ( GetThreadState ( ) - > rejecting )
{
return false ;
}
}
2023-03-05 12:55:07 +00:00
#if 0
return this - > InternalRunOne ( false , uCount ) ;
# else
AuUInt32 uCount { } ;
do
{
uCount = 0 ;
ranAtLeastOne | = this - > InternalRunOne ( false , uCount ) ;
}
while ( uCount ) ;
return uCount | | ranAtLeastOne ;
# endif
2021-11-05 17:34:23 +00:00
}
// internal api
bool ThreadPool : : Spawn ( WorkerId_t workerId , bool create )
{
AU_LOCK_GUARD ( rwlock_ - > AsWritable ( ) ) ;
2023-01-23 21:18:58 +00:00
if ( create )
2021-11-05 17:34:23 +00:00
{
2023-01-23 21:18:58 +00:00
gCurrentPool = AuSharedFromThis ( ) ;
2021-11-05 17:34:23 +00:00
}
2023-08-10 00:31:10 +00:00
AuSPtr < GroupState > pGroup ;
2021-11-05 17:34:23 +00:00
// Try fetch or allocate group
{
2023-08-10 00:31:10 +00:00
if ( ! ( pGroup = threadGroups_ [ workerId . first ] ) )
2021-11-05 17:34:23 +00:00
{
2023-08-10 00:31:10 +00:00
pGroup = AuMakeShared < GroupState > ( ) ;
if ( ! pGroup - > Init ( ) )
2021-11-05 17:34:23 +00:00
{
2023-01-30 13:26:17 +00:00
SysPushErrorMemory ( " Not enough memory to intiialize a new group state " ) ;
2021-11-05 17:34:23 +00:00
return false ;
}
2023-08-10 00:31:10 +00:00
this - > threadGroups_ [ workerId . first ] = pGroup ;
2021-11-05 17:34:23 +00:00
}
}
// Assert worker does not already exist
{
AuSPtr < ThreadState > * ret ;
2023-08-10 00:31:10 +00:00
if ( AuTryFind ( pGroup - > workers , workerId . second , ret ) )
2021-11-05 17:34:23 +00:00
{
2023-01-30 13:26:17 +00:00
SysPushErrorGeneric ( " Thread ID already exists " ) ;
2021-11-05 17:34:23 +00:00
return false ;
}
}
auto threadState = AuMakeShared < ThreadState > ( ) ;
2022-11-17 20:56:41 +00:00
if ( ! threadState )
{
2023-01-30 13:26:17 +00:00
SysPushErrorMemory ( ) ;
2022-11-17 20:56:41 +00:00
return { } ;
}
2023-08-10 00:31:10 +00:00
threadState - > parent = pGroup ;
2021-11-05 17:34:23 +00:00
threadState - > id = workerId ;
2022-11-17 20:56:41 +00:00
threadState - > asyncLoop = AuMakeShared < AsyncLoop > ( ) ;
2023-01-30 13:26:17 +00:00
if ( ! threadState - > asyncLoop )
{
SysPushErrorMemory ( ) ;
return { } ;
}
2023-05-25 00:55:55 +00:00
threadState - > eventLs = AuLoop : : NewLSAsync ( ) ;
if ( ! threadState - > eventLs )
{
SysPushErrorMemory ( ) ;
return { } ;
}
threadState - > asyncLoopSourceShared = threadState - > eventLs ;
2022-11-17 20:56:41 +00:00
threadState - > asyncLoop - > pParent = threadState . get ( ) ;
2022-03-10 15:35:01 +00:00
threadState - > rateLimiter . SetNextStep ( 1'000'000 ) ; // 1MS in nanoseconds
2022-06-22 13:42:17 +00:00
threadState - > runMode = ERunMode : : eEfficient ;
2022-03-10 15:35:01 +00:00
if ( ! threadState - > asyncLoop )
{
2023-01-30 13:26:17 +00:00
SysPushErrorMemory ( ) ;
2022-03-10 15:35:01 +00:00
return { } ;
}
2022-12-16 03:48:04 +00:00
if ( ! threadState - > asyncLoop - > Init ( ) )
{
SysPushErrorNested ( ) ;
return { } ;
}
2023-05-25 00:55:55 +00:00
threadState - > asyncLoop - > SourceAdd ( threadState - > eventLs ) ;
2021-11-05 17:34:23 +00:00
if ( ! create )
{
2022-04-05 10:11:19 +00:00
threadState - > threadObject = AuThreads : : ThreadShared ( AuThreads : : ThreadInfo (
2021-11-05 17:34:23 +00:00
AuMakeShared < AuThreads : : IThreadVectorsFunctional > ( AuThreads : : IThreadVectorsFunctional : : OnEntry_t ( std : : bind ( & ThreadPool : : Entrypoint , this , threadState - > id ) ) ,
AuThreads : : IThreadVectorsFunctional : : OnExit_t { } ) ,
gRuntimeConfig . async . threadPoolDefaultStackSize
) ) ;
2023-08-09 02:21:14 +00:00
2021-11-05 17:34:23 +00:00
if ( ! threadState - > threadObject )
{
return { } ;
}
2023-08-09 02:21:14 +00:00
2021-11-05 17:34:23 +00:00
threadState - > threadObject - > Run ( ) ;
}
else
{
threadState - > threadObject = AuSPtr < AuThreads : : IAuroraThread > ( AuThreads : : GetThread ( ) , [ ] ( AuThreads : : IAuroraThread * ) { } ) ;
// TODO: this is just a hack
// we should implement this properly
threadState - > threadObject - > AddLastHopeTlsHook ( AuMakeShared < AuThreads : : IThreadFeatureFunctional > ( [ ] ( ) - > void
{
} , [ ] ( ) - > void
{
auto pid = GetCurrentWorkerPId ( ) ;
if ( pid . pool )
{
2022-05-17 01:43:26 +00:00
GetWorkerInternal ( pid . pool ) - > ThisExiting ( ) ;
2021-11-05 17:34:23 +00:00
}
} ) ) ;
//
gCurrentPool = AuWeakFromThis ( ) ;
tlsWorkerId = WorkerPId_t ( AuSharedFromThis ( ) , workerId ) ;
}
2023-08-10 00:31:10 +00:00
pGroup - > AddWorker ( workerId . second , threadState ) ;
2023-05-25 00:55:55 +00:00
2021-11-05 17:34:23 +00:00
return true ;
}
// private api
2023-02-08 18:23:37 +00:00
AU_NOINLINE bool ThreadPool : : Barrier ( WorkerId_t workerId , AuUInt32 ms , bool requireSignal , bool drop )
2021-11-05 17:34:23 +00:00
{
2022-06-21 04:49:36 +00:00
auto self = GetThreadState ( ) ;
if ( ! self )
{
return { } ;
}
auto & semaphore = self - > syncSema ;
2023-05-25 00:55:55 +00:00
auto unsafeSemaphore = semaphore . AsPointer ( ) ;
2021-11-05 17:34:23 +00:00
bool failed { } ;
auto work = AuMakeShared < AsyncFuncRunnable > (
[ = ] ( )
{
auto state = GetThreadState ( ) ;
if ( drop )
{
state - > rejecting = true ;
}
if ( requireSignal )
{
state - > running - > Reset ( ) ;
}
unsafeSemaphore - > Unlock ( 1 ) ;
if ( requireSignal )
{
state - > running - > Lock ( ) ;
}
} ,
[ & ] ( )
{
unsafeSemaphore - > Unlock ( 1 ) ;
failed = true ;
}
) ;
if ( ! work )
{
return false ;
}
Run ( workerId , work ) ;
2023-05-25 00:55:55 +00:00
return WaitFor ( workerId , AuUnsafeRaiiToShared ( semaphore . AsPointer ( ) ) , ms ) & & ! failed ;
2021-11-05 17:34:23 +00:00
}
void ThreadPool : : Entrypoint ( WorkerId_t id )
{
gCurrentPool = AuWeakFromThis ( ) ;
tlsWorkerId = WorkerPId_t ( AuSharedFromThis ( ) , id ) ;
auto job = GetThreadState ( ) ;
Run ( ) ;
if ( id ! = WorkerId_t { 0 , 0 } )
{
AU_LOCK_GUARD ( this - > rwlock_ - > AsReadable ( ) ) ;
if ( ! this - > shuttingdown_ & & ! job - > rejecting )
{
// Pump and barrier + reject all after atomically
Barrier ( id , 0 , false , true ) ;
}
}
ThisExiting ( ) ;
if ( id = = WorkerId_t { 0 , 0 } )
{
CleanWorkerPoolReservedZeroFree ( ) ;
}
}
2023-02-08 18:23:37 +00:00
void ThreadPool : : EarlyExitTick ( )
{
auto jobWorker = GetThreadState ( ) ;
auto state = jobWorker - > parent . lock ( ) ;
if ( ! jobWorker )
{
SysPushErrorUninitialized ( " Not an async thread " ) ;
return ;
}
if ( ( this - > shuttingdown_ & 2 ) ! = 2 )
{
return ;
}
2023-05-25 00:55:55 +00:00
state - > BroadCast ( ) ;
2023-02-08 18:23:37 +00:00
{
if ( AuExchange ( jobWorker - > bAlreadyDoingExitTick , true ) )
{
return ;
}
2023-03-05 12:55:07 +00:00
AuUInt32 uCount { } ;
do
{
uCount = 0 ;
this - > PollInternal ( false , uCount ) ;
}
while ( uCount ) ;
2023-02-08 18:23:37 +00:00
}
AuList < AuSPtr < AuThreads : : IThreadFeature > > features ;
{
2023-02-16 01:01:21 +00:00
AU_LOCK_GUARD ( this - > rwlock_ - > AsReadable ( ) ) ; // WARNING: this should be write, but im seeing a deadlock on exit
// there is no real race condition to be concerned about
// AsReadable shouldn't be enterable while someone is writing (the other accessor)
2023-02-08 18:23:37 +00:00
features = AuExchange ( jobWorker - > features , { } ) ;
}
{
for ( const auto & thread : features )
{
try
{
thread - > Cleanup ( ) ;
}
catch ( . . . )
{
SysPushErrorCatch ( " Couldn't clean up thread feature! " ) ;
}
}
2023-06-07 19:51:23 +00:00
jobWorker - > isDeadEvent - > Set ( ) ;
2023-02-08 18:23:37 +00:00
jobWorker - > bAlreadyDoingExitTick = false ;
jobWorker - > bBreakEarly = true ;
}
}
2021-11-05 17:34:23 +00:00
void ThreadPool : : ThisExiting ( )
{
auto id = GetCurrentThread ( ) ;
auto state = GetGroup ( id . first ) ;
2023-02-08 18:23:37 +00:00
AuList < AuSPtr < AuThreads : : IThreadFeature > > features ;
2021-11-05 17:34:23 +00:00
{
2023-06-07 19:35:31 +00:00
//AU_LOCK_GUARD(this->rwlock_->AsWritable());
AU_LOCK_GUARD ( this - > rwlock_ - > AsReadable ( ) ) ;
2021-11-05 17:34:23 +00:00
auto itr = state - > workers . find ( id . second ) ;
auto & jobWorker = itr - > second ;
2023-06-07 19:35:31 +00:00
jobWorker - > isDeadEvent - > Set ( ) ;
2021-11-05 17:34:23 +00:00
CleanUpWorker ( id ) ;
// Abort scheduled tasks
TerminateSceduledTasks ( this , id ) ;
2023-01-30 14:32:26 +00:00
// Prevent deadlocks
2023-06-07 19:35:31 +00:00
jobWorker - > syncSema - > Unlock ( 10 ) ; // prevent ::Barrier dead-locks
2023-01-30 14:32:26 +00:00
{
AU_LOCK_GUARD ( jobWorker - > externalFencesLock ) ;
jobWorker - > exitingflag2 = true ;
for ( const auto & pIWaitable : jobWorker - > externalFences )
{
pIWaitable - > Unlock ( ) ;
}
jobWorker - > externalFences . clear ( ) ;
}
2023-02-08 18:23:37 +00:00
features = AuExchange ( jobWorker - > features , { } ) ;
}
{
2021-11-05 17:34:23 +00:00
// Clean up thread features
// -> transferable TLS handles
// -> thread specific vms
// -> anything your brain wishes to imagination
2023-02-08 18:23:37 +00:00
for ( const auto & thread : features )
2021-11-05 17:34:23 +00:00
{
try
{
thread - > Cleanup ( ) ;
}
catch ( . . . )
{
2022-01-24 18:37:06 +00:00
AuLogWarn ( " Couldn't clean up thread feature! " ) ;
2021-11-05 17:34:23 +00:00
Debug : : PrintError ( ) ;
}
}
2023-02-08 18:23:37 +00:00
features . clear ( ) ;
}
2023-06-07 19:35:31 +00:00
2023-02-08 18:23:37 +00:00
{
AU_LOCK_GUARD ( this - > rwlock_ - > AsWritable ( ) ) ;
auto itr = state - > workers . find ( id . second ) ;
auto & jobWorker = itr - > second ;
2021-11-05 17:34:23 +00:00
state - > workers . erase ( itr ) ;
}
}
AuSPtr < GroupState > ThreadPool : : GetGroup ( ThreadGroup_t type )
{
2023-08-10 00:31:10 +00:00
return this - > threadGroups_ [ type ] ;
2021-11-05 17:34:23 +00:00
}
AuSPtr < ThreadState > ThreadPool : : GetThreadState ( )
{
2022-05-10 13:51:22 +00:00
auto thread = gCurrentPool . lock ( ) ;
if ( ! thread )
{
return { } ;
}
# if defined(AU_CFG_ID_INTERNAL) || defined(AU_CFG_ID_DEBUG)
if ( thread . get ( ) ! = this )
{
SysPushErrorGeneric ( " wrong thread " ) ;
return { } ;
}
# endif
auto worker = * tlsWorkerId ;
auto state = GetGroup ( worker . first ) ;
if ( ! state )
{
return { } ;
}
2023-05-25 00:55:55 +00:00
return state - > GetThreadByIndex ( worker . second ) ;
2021-11-05 17:34:23 +00:00
}
2023-02-08 18:23:37 +00:00
AuSPtr < ThreadState > ThreadPool : : GetThreadStateNoWarn ( )
{
auto thread = gCurrentPool . lock ( ) ;
if ( ! thread )
{
return { } ;
}
if ( thread . get ( ) ! = this )
{
return { } ;
}
auto worker = * tlsWorkerId ;
auto state = GetGroup ( worker . first ) ;
if ( ! state )
{
return { } ;
}
2023-05-25 00:55:55 +00:00
return state - > GetThreadByIndex ( worker . second ) ;
2023-02-08 18:23:37 +00:00
}
2021-11-05 17:34:23 +00:00
AuSPtr < ThreadState > ThreadPool : : GetThreadHandle ( WorkerId_t id )
{
auto group = GetGroup ( id . first ) ;
if ( ! group )
{
return { } ;
}
2023-05-25 00:55:55 +00:00
return group - > GetThreadByIndex ( id . second ) ;
2021-11-05 17:34:23 +00:00
}
2022-03-10 15:35:01 +00:00
AuList < AuSPtr < ThreadState > > ThreadPool : : GetThreadHandles ( WorkerId_t id )
{
AU_LOCK_GUARD ( this - > rwlock_ - > AsReadable ( ) ) ;
auto group = GetGroup ( id . first ) ;
if ( ! group )
{
return { } ;
}
AuList < AuSPtr < ThreadState > > ret ;
if ( id . second ! = Async : : kThreadIdAny )
{
2023-05-25 00:55:55 +00:00
if ( auto pPtr = group - > GetThreadByIndex ( id . second ) )
2022-03-10 15:35:01 +00:00
{
2023-05-25 00:55:55 +00:00
ret . push_back ( pPtr ) ;
2022-03-10 15:35:01 +00:00
}
}
else
{
for ( const auto & [ key , value ] : group - > workers )
{
ret . push_back ( value ) ;
}
}
return ret ;
}
2021-11-05 17:34:23 +00:00
AUKN_SYM AuSPtr < IThreadPool > NewThreadPool ( )
{
// apps that don't require async shouldn't be burdened with the overhead of this litl spiner
StartSched ( ) ;
return AuMakeShared < ThreadPool > ( ) ;
}
2023-08-09 09:25:22 +00:00
struct KeepGroupAlive
{
KeepGroupAlive ( AuSPtr < AuAsync : : IThreadPool > pPool ) : pPool ( AuStaticCast < AuAsync : : ThreadPool > ( pPool ) )
{
AuAtomicAdd ( & this - > pPool - > uAtomicCounter , 1u ) ;
}
~ KeepGroupAlive ( )
{
auto uNow = AuAtomicSub ( & this - > pPool - > uAtomicCounter , 1u ) ;
if ( uNow = = 0 )
{
2023-08-10 00:31:10 +00:00
for ( const auto & pState : this - > pPool - > threadGroups_ )
2023-08-09 09:25:22 +00:00
{
2023-08-10 00:31:10 +00:00
if ( pState )
{
pState - > BroadCast ( ) ;
}
2023-08-09 09:25:22 +00:00
}
}
}
AuSPtr < AuAsync : : ThreadPool > pPool ;
} ;
AUKN_SYM AuSPtr < void > KeepThreadPoolAlive ( AuSPtr < AuAsync : : IThreadPool > pPool )
{
return AuMakeSharedThrow < KeepGroupAlive > ( pPool ) ;
}
2021-11-05 17:34:23 +00:00
}