[+] AU_LOCK_GLOBAL_GUARD

[+] AuThreading::GetShutdownReadLock()
This commit is contained in:
Reece Wilson 2024-09-09 02:47:38 +01:00
parent 16409d449b
commit 0571aa8dd4
25 changed files with 206 additions and 92 deletions

View File

@ -83,4 +83,5 @@ namespace Aurora::Threading
};
#define AU_LOCK_GUARD(variable) Aurora::Threading::LockGuard<decltype(variable)> AU_CONCAT(__stack_lock, __COUNTER__) (variable);
#define AU_LOCK_GLOBAL_GUARD(variable) AU_LOCK_GUARD(Aurora::Threading::GetShutdownReadLock()); AU_LOCK_GUARD(variable);
}

View File

@ -9,7 +9,28 @@
namespace Aurora::Threading
{
struct IWaitable;
/// Surrenders the current threads time-slice to the kernel, on supporting platforms.
/// You shouldn't need nor desire this.
AUKN_SYM void ContextYield();
/// This is a special primitive that anybody can lock at any point, and should be done so before locking global primitives.
///
/// Intended use case: AU_LOCK_GLOBAL_GUARD(gMyGLobalResourceMutex) or AU_LOCK_GLOBAL_GUARD(gSingleton.mutex)
///
/// I'll let you in on a little secret: it's not special; it's just the read side of a read write lock, with some special invokation logic on the write side.
/// One should always expect this to *read lock* to pass without blocking.
/// Once a thread is demanding the forceful termination of another, the write side is acquired, and future * non-recursive read * acquisitions are rejected until the requested thread has terminated.
/// As previously stated, this is intended for protecting the lock guard of global primitives. Why? Global primitives cannot be as easily recycled.
/// You can always trash a memory heap / component db / etc and reset. The forceful shutting down of a subsystem is viable, especially if their heaps are isolated and their IO resources tracked under a watchdog.
/// On the otherhand, it's quite difficult to wrangle force thread shutdown, for two and a half reasons:
/// 1) POSIX APIs do not allow force terminate,
/// 1.5) clang is broken - it doesnt allow us to catch ghetto gcc abi::__forced_unwind - not that other users cant clobber these to begin with, and
/// 2) global locks may never be released.
/// 1. Isn't an issue for us.
/// 2. For short global read or write operations, GetShutdownLock can be held to ensure force thread termination doesn't mess up future consumers of a global resource.
AUKN_SYM IWaitable *GetShutdownReadLock();
}
#include "IWaitable.hpp"

View File

@ -286,7 +286,8 @@ namespace Aurora::Async
WorkerId_t target,
AuSPtr<IAsyncRunnable> runnable)
{
AU_LOCK_GUARD(gSchedLock);
AU_LOCK_GLOBAL_GUARD(gSchedLock);
AU_DEBUG_MEMCRUNCH;
if (pool)
{
@ -301,7 +302,7 @@ namespace Aurora::Async
void TerminateSceduledTasks(IThreadPoolInternal *pool,
WorkerId_t target)
{
AU_LOCK_GUARD(gSchedLock);
AU_LOCK_GLOBAL_GUARD(gSchedLock);
for (auto itr = gOrderedEntries.begin(); itr != gOrderedEntries.end(); )
{

View File

@ -128,14 +128,18 @@ namespace Aurora::Async
if (auto pPool = unlocker)
{
auto pPoolEx = AuStaticCast<ThreadPool>(unlocker.GetPool());
AU_LOCK_GUARD(pPoolEx->rwlock_->AsReadable());
AU_LOCK_GLOBAL_GUARD(pPoolEx->rwlock_->AsReadable());
auto pShutdownLock = Aurora::Threading::GetShutdownReadLock();
if ((pHandle = AuStaticCast<ThreadPool>(unlocker.GetPool())->GetThreadHandle(unlocker)))
{
AU_LOCK_GUARD(pHandle->externalFencesLock);
if (pHandle->exitingflag2)
{
return primitive->TryLock();
pShutdownLock->Unlock();
bool bRet = primitive->TryLock();
pShutdownLock->Lock();
return bRet;
}
else
{
@ -144,7 +148,10 @@ namespace Aurora::Async
}
else if (unlocker.GetPool().get() == this)
{
return primitive->LockMS(timeoutMs);
pShutdownLock->Unlock();
bool bRet = primitive->LockMS(timeoutMs);
pShutdownLock->Lock();
return bRet;
}
}
@ -152,7 +159,7 @@ namespace Aurora::Async
if (pHandle)
{
AU_LOCK_GUARD(pHandle->externalFencesLock);
AU_LOCK_GLOBAL_GUARD(pHandle->externalFencesLock);
AuTryRemove(pHandle->externalFences, primitive.get());
}
@ -458,7 +465,7 @@ namespace Aurora::Async
if (!block &&
!(this->shuttingdown_ & 2)) // quick hack: is worthy of io reset by virtue of having polled externally (most likely for IO ticks, unlikely for intraprocess ticks)
{
AU_LOCK_GUARD(group->workQueue.mutex); // dont atomically increment our work counters [signal under mutex group]...
AU_LOCK_GLOBAL_GUARD(group->workQueue.mutex); // dont atomically increment our work counters [signal under mutex group]...
AU_LOCK_GUARD(group->workersMutex); // dont atomically increment our work counters [broadcast]...
// ...these primitives are far less expensive to hit than resetting kernel primitives
// AU_LOCK_GUARD(state->cvWorkMutex) used to protect us
@ -597,7 +604,7 @@ namespace Aurora::Async
continue;
}
AU_LOCK_GUARD(pGroup->workersMutex);
AU_LOCK_GLOBAL_GUARD(pGroup->workersMutex);
for (auto &[id, worker] : pGroup->workers)
{
if (trySelfPid == worker->thread.id)
@ -644,7 +651,7 @@ namespace Aurora::Async
AuList<AuThreads::ThreadShared_t> threads;
AuList<AuSPtr<ThreadState>> states;
{
AU_LOCK_GUARD(this->pRWReadView);
AU_LOCK_GLOBAL_GUARD(this->pRWReadView);
for (auto pGroup : this->threadGroups_)
{
@ -898,14 +905,23 @@ namespace Aurora::Async
bool ThreadPool::Sync(WorkerId_t workerId, AuUInt32 timeoutMs, bool requireSignal)
{
AU_LOCK_GUARD(this->pRWReadView);
//AU_LOCK_GUARD(this->pRWReadView);
auto group = GetGroup(workerId.first);
auto currentWorkerId = GetCurrentThread().second;
if (workerId.second == Async::kThreadIdAny)
{
for (auto &jobWorker : group->workers)
decltype(GroupState::workers) workers;
{
AU_LOCK_GLOBAL_GUARD(this->pRWReadView);
if (auto pGroup = GetGroup(workerId.first))
{
workers = pGroup->workers;
}
}
for (auto &jobWorker : workers)
{
if (!Barrier(jobWorker.second->thread.id, timeoutMs, requireSignal && jobWorker.second->thread.id.second != currentWorkerId, false)) // BAD!, should subtract time elapsed, clamp to, i dunno, 5ms min?
{
@ -926,7 +942,7 @@ namespace Aurora::Async
auto group = GetGroup(workerId.first);
if (workerId.second == Async::kThreadIdAny)
{
AU_LOCK_GUARD(group->workersMutex);
AU_LOCK_GLOBAL_GUARD(group->workersMutex);
for (auto &jobWorker : group->workers)
{
jobWorker.second->running->Set();
@ -964,7 +980,7 @@ namespace Aurora::Async
void ThreadPool::SyncAllSafe()
{
AU_LOCK_GUARD(this->pRWReadView);
AU_LOCK_GLOBAL_GUARD(this->pRWReadView);
for (auto pGroup : this->threadGroups_)
{
@ -1150,7 +1166,7 @@ namespace Aurora::Async
if (workerId.second == kThreadIdAny)
{
AU_LOCK_GUARD(group->workersMutex);
AU_LOCK_GLOBAL_GUARD(group->workersMutex);
for (auto &[jobWorker, pState]: group->workers)
{
@ -1208,7 +1224,7 @@ namespace Aurora::Async
bool ThreadPool::Spawn(WorkerId_t workerId, bool create)
{
AU_LOCK_GUARD(this->rwlock_->AsWritable());
AU_LOCK_GLOBAL_GUARD(this->rwlock_->AsWritable());
if (create)
{
@ -1354,7 +1370,7 @@ namespace Aurora::Async
void ThreadPool::Entrypoint(WorkerId_t id)
{
{
AU_LOCK_GUARD(this->pRWReadView);
AU_LOCK_GLOBAL_GUARD(this->pRWReadView);
}
tlsCurrentThreadPool = AuWeakFromThis();
@ -1366,7 +1382,7 @@ namespace Aurora::Async
if (id != WorkerId_t {0, 0})
{
AU_LOCK_GUARD(this->pRWReadView);
AU_LOCK_GLOBAL_GUARD(this->pRWReadView);
if (!AuAtomicLoad(&this->shuttingdown_) && !job->shutdown.bDropSubmissions)
{
@ -1452,7 +1468,7 @@ namespace Aurora::Async
AuList<AuSPtr<AuThreads::IThreadFeature>> features;
{
AU_LOCK_GUARD(this->pRWReadView);
AU_LOCK_GLOBAL_GUARD(this->pRWReadView);
pLocalState->isDeadEvent->Set();
@ -1607,7 +1623,8 @@ namespace Aurora::Async
}
else
{
AU_LOCK_GUARD(group->workersMutex);
AU_LOCK_GLOBAL_GUARD(group->workersMutex);
for (const auto &[key, value] : group->workers)
{
ret.push_back(value);

View File

@ -52,7 +52,7 @@ namespace Aurora::Console::Commands
// TODO: try catch?
{
AU_LOCK_GUARD(gPendingCommandsMutex);
AU_LOCK_GLOBAL_GUARD(gPendingCommandsMutex);
{
AuString tag;
@ -117,13 +117,13 @@ namespace Aurora::Console::Commands
AUKN_SYM void RemoveCommand(const AuROString &tag)
{
AU_LOCK_GUARD(gPendingCommandsMutex);
AU_LOCK_GLOBAL_GUARD(gPendingCommandsMutex);
AuTryRemove(gCommands, tag);
}
AUKN_SYM void AddCommand(const AuROString &tag, const Parse::ParseObject &commandStructure, const AuSPtr<ICommandSubscriber> &callback)
{
AU_LOCK_GUARD(gPendingCommandsMutex);
AU_LOCK_GLOBAL_GUARD(gPendingCommandsMutex);
SysAssert(callback);
gCommands.insert(AuMakePair(AuString(tag), Command(AuString(tag), commandStructure, callback)));
}
@ -145,7 +145,7 @@ namespace Aurora::Console::Commands
void UpdateDispatcher(AuOptionalEx<AuWorkerPId_t> target)
{
AU_LOCK_GUARD(gMutex);
AU_LOCK_GLOBAL_GUARD(gMutex);
AU_LOCK_GUARD(gPendingCommandsMutex);
// process commands before async app termination
@ -183,7 +183,7 @@ namespace Aurora::Console::Commands
void PumpCommands()
{
AU_LOCK_GUARD(gMutex);
AU_LOCK_GLOBAL_GUARD(gMutex);
AuList<CommandDispatch> commands;
{

View File

@ -192,7 +192,7 @@ namespace Aurora::Debug
AuUInt32 ReportStackTrace(const StackTrace& trace, const AuString& message)
{
AU_LOCK_GUARD(gLock);
AU_LOCK_GLOBAL_GUARD(gLock);
tlsLastStackTrace = trace;
tlsLastExceptionMessage = message;
tlsLastBackTrace = gStackTraceFence++;
@ -212,7 +212,7 @@ namespace Aurora::Debug
AUKN_SYM StackTrace GetLastStackTrace()
{
AU_LOCK_GUARD(gLock);
AU_LOCK_GLOBAL_GUARD(gLock);
return tlsLastStackTrace;
}
@ -227,7 +227,7 @@ namespace Aurora::Debug
AUKN_SYM AuString GetLastException()
{
AU_LOCK_GUARD(gLock);
AU_LOCK_GLOBAL_GUARD(gLock);
return tlsLastExceptionMessage;
}

View File

@ -71,7 +71,7 @@ namespace Aurora::Exit
bool bOldTerminatingValue;
{
AU_LOCK_GUARD(gMutex);
AU_LOCK_GLOBAL_GUARD(gMutex);
bool isTerminate = level == ETriggerLevel::eSafeTermination;
bool isPreempting {};
@ -175,13 +175,13 @@ namespace Aurora::Exit
{
return false;
}
AU_LOCK_GUARD(gMutex);
AU_LOCK_GLOBAL_GUARD(gMutex);
return AuTryInsert(gTriggerSubscribers, AuMakePair(callback, level));
}
AUKN_SYM void ExitHandlerRemove(const AuSPtr<IExitSubscriber> &callback)
{
AU_LOCK_GUARD(gMutex);
AU_LOCK_GLOBAL_GUARD(gMutex);
AuRemoveAllIf(gTriggerSubscribers, [=](const auto &entry) -> bool
{

View File

@ -184,7 +184,7 @@ namespace Aurora::Grug
}
{
AU_LOCK_GUARD(gOtherMutex);
AU_LOCK_GLOBAL_GUARD(gOtherMutex);
toClose = AuMove(gHandlesToClose);
toTrigger = AuMove(gEventsToTrigger);
}
@ -232,7 +232,7 @@ namespace Aurora::Grug
{
{
AU_DEBUG_MEMCRUNCH;
AU_LOCK_GUARD(gOtherMutex);
AU_LOCK_GLOBAL_GUARD(gOtherMutex);
gHandlesToClose.push_back(AuMakeTuple(AuUInt(handle), bFlush, bWriteEoS));
}
@ -251,7 +251,7 @@ namespace Aurora::Grug
{
AU_DEBUG_MEMCRUNCH;
AU_LOCK_GUARD(gOtherMutex);
AU_LOCK_GLOBAL_GUARD(gOtherMutex);
gEventsToTrigger.push_back(event.AsPointer());
}

View File

@ -33,7 +33,7 @@ namespace Aurora::IO::Async
AUKN_SYM void UseSpecifiedWorkerGroup(AuAsync::WorkerPId_t worker)
{
AU_LOCK_GUARD(gMutex);
AU_LOCK_GLOBAL_GUARD(gMutex);
gDefaultGroup = worker;
bOwnsThreadPool = false;
}
@ -45,7 +45,7 @@ namespace Aurora::IO::Async
AUKN_SYM AuUInt32 SpawnMoreThreads(AuUInt32 uRequest)
{
AU_LOCK_GUARD(gMutex);
AU_LOCK_GLOBAL_GUARD(gMutex);
AuUInt32 ret {};
AuUInt32 uStartOffset {};
@ -143,7 +143,7 @@ namespace Aurora::IO::Async
AuAsync::WorkerPId_t GetAuxWorkerPoolAndRegister()
{
AU_LOCK_GUARD(gMutex);
AU_LOCK_GLOBAL_GUARD(gMutex);
if (auto worker = GetAuxWorkerPool())
{

View File

@ -1046,7 +1046,7 @@ namespace Aurora::IO::FS
AuList<FSDevice> SysGetFSDevices()
{
AU_LOCK_GUARD(gFSDirMutex);
AU_LOCK_GLOBAL_GUARD(gFSDirMutex);
AuList<FSDevice> devices;
#if defined(AURORA_PLATFORM_WIN32)

View File

@ -26,7 +26,7 @@ namespace Aurora::IO::FS
{
InitPlatformFSCacheAtLoad();
AU_LOCK_GUARD(gFSDirMutex);
AU_LOCK_GLOBAL_GUARD(gFSDirMutex);
for (const auto &refFSDevice : gCachedDevices)
{
if (refFSDevice.devicePath == physicalDevicePath)
@ -52,7 +52,7 @@ namespace Aurora::IO::FS
return {};
}
AU_LOCK_GUARD(gFSDirMutex);
AU_LOCK_GLOBAL_GUARD(gFSDirMutex);
for (const auto &refFSDevice : gCachedDevices)
{
@ -83,7 +83,7 @@ namespace Aurora::IO::FS
AUKN_SYM AuList<FSDevice> GetFSDevices()
{
InitPlatformFSCacheAtLoad();
AU_LOCK_GUARD(gFSDirMutex);
AU_LOCK_GLOBAL_GUARD(gFSDirMutex);
return gCachedDevices;
}
@ -91,7 +91,7 @@ namespace Aurora::IO::FS
{
InitPlatformFSCacheAtLoad();
AU_LOCK_GUARD(gFSDirMutex);
AU_LOCK_GLOBAL_GUARD(gFSDirMutex);
for (const auto &refFSDevice : gCachedDevices)
{
if (refFSDevice.devicePath == physicalDevicePath)
@ -114,7 +114,7 @@ namespace Aurora::IO::FS
AUKN_SYM void ResetDeviceCache()
{
AU_LOCK_GUARD(gFSDirMutex);
AU_LOCK_GLOBAL_GUARD(gFSDirMutex);
gCachedDevices = AuMove(SysGetFSDevices());
}
}

View File

@ -27,7 +27,7 @@ namespace Aurora::IO::NT
HANDLE GetHandleForToken(AuUInt32 uToken)
{
AU_LOCK_GUARD(gMutex);
AU_LOCK_GLOBAL_GUARD(gMutex);
auto itr = gHANDLECookieMap.find(uToken);
if (itr != gHANDLECookieMap.end())
@ -211,7 +211,7 @@ namespace Aurora::IO::NT
}
{
AU_LOCK_GUARD(gMutex);
AU_LOCK_GLOBAL_GUARD(gMutex);
do
{
@ -230,7 +230,7 @@ namespace Aurora::IO::NT
void FDServeEnd(const IPC::IPCToken &handle)
{
AU_LOCK_GUARD(gMutex);
AU_LOCK_GLOBAL_GUARD(gMutex);
AuTryRemove(gHANDLECookieMap, handle.cookie);
}

View File

@ -38,7 +38,7 @@ namespace Aurora::Logging
{
AuAtomicSub(&gAtomicInUseCount, 1u);
AU_LOCK_GUARD(gMutex);
AU_LOCK_GLOBAL_GUARD(gMutex);
if (!LogClassInUse(uIdx + AuLog::kLogLevelUsr))
{
@ -49,7 +49,7 @@ namespace Aurora::Logging
static bool SetString(AuUInt8 uIdx, const AuString &str)
{
AU_LOCK_GUARD(gMutex);
AU_LOCK_GLOBAL_GUARD(gMutex);
return bool(AuTryConstruct(gStringMap[uIdx], str));
}
@ -100,7 +100,7 @@ namespace Aurora::Logging
AUKN_SYM AuString LogClassGetNameSafe(AuUInt8 uIndex)
{
AU_LOCK_GUARD(gMutex);
AU_LOCK_GLOBAL_GUARD(gMutex);
uIndex -= AuLog::kLogLevelUsr;

View File

@ -23,7 +23,7 @@ namespace Aurora::Logging
{
AuMemset(shouldFilter, 0, sizeof(shouldFilter));
{
AU_LOCK_GUARD(gGlobalSpin);
AU_LOCK_GLOBAL_GUARD(gGlobalSpin);
SysAssert(AuTryInsert(gFlushableLoggers, this));
}
}
@ -95,7 +95,7 @@ namespace Aurora::Logging
void Logger::AddToPushQueueConst(AuUInt8 uLevel, const ConsoleMessage &msg)
{
{
AU_LOCK_GUARD(gTaskSpin);
AU_LOCK_GLOBAL_GUARD(gTaskSpin);
auto nice = gLogTasks.size() + 1;
if (gLogTasks.capacity() < nice)
@ -137,7 +137,7 @@ namespace Aurora::Logging
void Logger::PopFilter()
{
AU_LOCK_GUARD(this->spin);
AU_LOCK_GLOBAL_GUARD(this->spin);
this->filters.pop_back();
}
@ -145,10 +145,10 @@ namespace Aurora::Logging
{
AU_DEBUG_MEMCRUNCH;
decltype(gLogTasks) logTasks;
AU_LOCK_GUARD(gGlobalSpin);
AU_LOCK_GLOBAL_GUARD(gGlobalSpin);
{
AU_LOCK_GUARD(gTaskSpin);
AU_LOCK_GLOBAL_GUARD(gTaskSpin);
AuSwap(logTasks, gLogTasks);
gLogTasks.reserve(12 * 1024);
}
@ -238,20 +238,20 @@ namespace Aurora::Logging
void ForceFlushFlush()
{
AU_LOCK_GUARD(gGlobalSpin);
AU_LOCK_GLOBAL_GUARD(gGlobalSpin);
ForceFlushFlushNoLock();
}
void Logger::Disable()
{
AU_LOCK_GUARD(gGlobalSpin);
AU_LOCK_GLOBAL_GUARD(gGlobalSpin);
ForceFlushLoggersNoLock();
ForceFlushLogger(this);
{
AU_LOCK_GUARD(spin);
AU_LOCK_GLOBAL_GUARD(spin);
AuMemset(shouldFilter, 1, sizeof(shouldFilter));
}

View File

@ -468,7 +468,7 @@ namespace Aurora::Process
{
case EModulePath::eClassPath:
{
AU_LOCK_GUARD(gSpinLock);
AU_LOCK_GLOBAL_GUARD(gSpinLock);
arrayPaths = gClassPath;
break;
}
@ -592,7 +592,7 @@ namespace Aurora::Process
AUKN_SYM void *LoadModuleEx(const ModuleLoadRequest &request)
{
AU_LOCK_GUARD(gSpinLock);
AU_LOCK_GLOBAL_GUARD(gSpinLock);
auto h = gModuleHandles.find(request.mod);
if (h != gModuleHandles.end())
@ -664,7 +664,7 @@ namespace Aurora::Process
if (!ret)
#endif
{
AU_LOCK_GUARD(gSpinLock);
AU_LOCK_GLOBAL_GUARD(gSpinLock);
for (const auto &[string, hHandle] : gModuleHandles)
{
@ -689,7 +689,7 @@ namespace Aurora::Process
if (!ret)
#endif
{
AU_LOCK_GUARD(gSpinLock);
AU_LOCK_GLOBAL_GUARD(gSpinLock);
for (const auto &[string, hHandle] : gModuleHandles)
{
@ -708,7 +708,7 @@ namespace Aurora::Process
AUKN_SYM void *GetProcHandle(const AuString &name)
{
AU_LOCK_GUARD(gSpinLock);
AU_LOCK_GLOBAL_GUARD(gSpinLock);
auto h = gModuleHandles.find(name);
if (h == gModuleHandles.end())
@ -904,7 +904,7 @@ namespace Aurora::Process
AUKN_SYM bool SetBinaryClassPath(const AuList<AuString> &list, bool preloadAll)
{
AU_DEBUG_MEMCRUNCH;
AU_LOCK_GUARD(gSpinLock);
AU_LOCK_GLOBAL_GUARD(gSpinLock);
AuExchange(gClassPath, list);
if (((preloadAll && gRuntimeConfig.processConfig.bEnablePreload)) ||
@ -925,7 +925,7 @@ namespace Aurora::Process
AUKN_SYM bool AddBinaryClassPath(const AuString &dir, bool preloadAll)
{
AU_LOCK_GUARD(gSpinLock);
AU_LOCK_GLOBAL_GUARD(gSpinLock);
if (!AuTryInsert(gClassPath, dir))
{
@ -943,7 +943,7 @@ namespace Aurora::Process
AUKN_SYM AuList<AuString> GetBinaryClassPath()
{
AU_LOCK_GUARD(gSpinLock);
AU_LOCK_GLOBAL_GUARD(gSpinLock);
return gClassPath;
}

View File

@ -26,9 +26,8 @@ namespace Aurora::Process
return {};
}
if (gMutex)
{
AU_LOCK_GUARD(gMutex);
AU_LOCK_GLOBAL_GUARD(gMutex);
if (path)
{
auto itr = gPathCache.find(reinterpret_cast<AuUInt>(handle));
@ -65,9 +64,8 @@ namespace Aurora::Process
auto ret = Locale::ConvertFromWChar(file.c_str(), file.length());
if (gMutex)
{
AU_LOCK_GUARD(gMutex);
AU_LOCK_GLOBAL_GUARD(gMutex);
if (path)
{
AuTryInsert(gPathCache, AuMakePair(reinterpret_cast<AuUInt>(handle), ret));
@ -186,16 +184,11 @@ namespace Aurora::Process
void InvaildateModule(HMODULE hmod)
{
if (!gMutex)
{
return;
}
try
{
// acquire gmutex
{
AU_LOCK_GUARD(gMutex);
AU_LOCK_GLOBAL_GUARD(gMutex);
auto itr1 = gPathCache.find(reinterpret_cast<AuUInt>(hmod));
if (itr1 != gPathCache.end()) gPathCache.erase(itr1);

View File

@ -77,7 +77,7 @@ namespace Aurora::Process
static AuSPtr<PublicModule> GetModuleFromSectionCache(AuUInt pointer)
{
AU_LOCK_GUARD(gMutexUnique);
AU_LOCK_GLOBAL_GUARD(gMutexUnique);
auto itr = gModulePtrMap.find(pointer);
if (itr == gModulePtrMap.end()) return {};
return itr->second.moduleMeta.lock();
@ -85,7 +85,7 @@ namespace Aurora::Process
static AuOptional<Section> GetSectionCache(AuUInt pointer)
{
AU_LOCK_GUARD(gMutexUnique);
AU_LOCK_GLOBAL_GUARD(gMutexUnique);
auto itr = gModulePtrMap.find(pointer);
if (itr == gModulePtrMap.end()) return {};
return itr->second;
@ -93,7 +93,7 @@ namespace Aurora::Process
bool IsInModuleCache(const ModuleBasePair &pair)
{
AU_LOCK_GUARD(gMutexUnique);
AU_LOCK_GLOBAL_GUARD(gMutexUnique);
return gModuleMap.find(pair) != gModuleMap.end();
}
@ -101,7 +101,7 @@ namespace Aurora::Process
{
for (auto &section : mod->sections)
{
AU_LOCK_GUARD(gMutexUnique);
AU_LOCK_GLOBAL_GUARD(gMutexUnique);
section.moduleMeta = mod;
if (!section.baseVa)
@ -117,14 +117,14 @@ namespace Aurora::Process
}
{
AU_LOCK_GUARD(gMutexUnique);
AU_LOCK_GLOBAL_GUARD(gMutexUnique);
gModuleMap[pair] = mod;
}
}
void RemoveModuleCache(const ModuleBasePair &eitherOr)
{
AU_LOCK_GUARD(gMutexUnique);
AU_LOCK_GLOBAL_GUARD(gMutexUnique);
auto itr = gModuleMap.find(eitherOr);
if (itr == gModuleMap.end()) return;
auto mod = itr->second;
@ -151,7 +151,7 @@ namespace Aurora::Process
PublicModule GetFromModuleCache(AuUInt handle)
{
AU_LOCK_GUARD(gMutexUnique);
AU_LOCK_GLOBAL_GUARD(gMutexUnique);
auto itr = gModuleMap.find({"", handle});
if (itr == gModuleMap.end()) return {};
return *itr->second;
@ -211,7 +211,7 @@ namespace Aurora::Process
void BorrowOtherSectionArray(const AuConsumer<AuList<Section>&> &callback)
{
AU_LOCK_GUARD(gMutexUnique);
AU_LOCK_GLOBAL_GUARD(gMutexUnique);
callback(gOtherSections);
}
@ -261,7 +261,7 @@ namespace Aurora::Process
AUKN_SYM Sections DumpExecutableAll()
{
AU_LOCK_GUARD(gMutexUnique);
AU_LOCK_GLOBAL_GUARD(gMutexUnique);
try
{
Sections ret;

View File

@ -134,7 +134,7 @@ namespace Aurora::Processes
AUKN_SYM void OpenUri(const AuROString &uri)
{
AU_LOCK_GUARD(gCondMutex);
AU_LOCK_GLOBAL_GUARD(gCondMutex);
AuTryInsert(gOpenItems, AuMakePair(AuString(uri), true));
gCondVariable->Signal();
}
@ -150,7 +150,7 @@ namespace Aurora::Processes
}
{
AU_LOCK_GUARD(gCondMutex);
AU_LOCK_GLOBAL_GUARD(gCondMutex);
AuTryInsert(gOpenItems, AuMove(AuMakePair(AuMove(path), false)));
gCondVariable->Signal();
}

View File

@ -0,0 +1,50 @@
/***
Copyright (C) Jamie Reece Wilson (a/k/a "Reece"). All rights reserved.
File: AuStopTheWorld.cpp
Date: 2024-09-09
Author: Reece
***/
#include <Source/RuntimeInternal.hpp>
#include "AuStopTheWorld.hpp"
#include "AuWakeOnAddress.hpp"
#include "Primitives/SMTYield.hpp"
namespace Aurora::Threading
{
static AuThreadPrimitives::RWLock gHolderRWLock;
AUKN_SYM IWaitable *GetShutdownReadLock()
{
return gHolderRWLock->AsReadable();
}
void WakeOnAddressHoldContainersAndTheWorld(const AuVoidFunc &func)
{
// We * must * allow ::GetShutdownLock() to be recursive, no matter how the runtime is configured
Primitives::ThrdCfg::gEnableRWLockWriteBiasOnReadLock = false;
{
// First: no-one shall pass anymore
AU_LOCK_GUARD(gHolderRWLock->AsWritable());
// Second: wait for any sleep ops to finish
for (AU_ITERATE_N(i, kDefaultWaitPerProcess))
{
gProcessWaitables.list[i].Lock();
}
{
func();
}
// Finally: reset
for (AU_ITERATE_N(i, kDefaultWaitPerProcess))
{
gProcessWaitables.list[i].Unlock();
}
}
Primitives::ThrdCfg::gEnableRWLockWriteBiasOnReadLock = gRuntimeConfig.threadingConfig.bEnableRWLockWriteBiasOnReadLock;
}
}

View File

@ -0,0 +1,13 @@
/***
Copyright (C) Jamie Reece Wilson (a/k/a "Reece"). All rights reserved.
File: AuStopTheWorld.hpp
Date: 2024-09-09
Author: Reece
***/
#pragma once
namespace Aurora::Threading
{
void WakeOnAddressHoldContainersAndTheWorld(const AuVoidFunc &func);
}

View File

@ -50,7 +50,6 @@ namespace Aurora::Threading
break; \
}
static ProcessWaitContainer gProcessWaitables;
static const int gShouldSpinOnlyInCPU = 1; // TODO: havent decided
// UPDATE: 1 paranoia just in case we get preempted (rare).
template<typename T>

View File

@ -143,4 +143,6 @@ namespace Aurora::Threading
void RemoveSelf(const void *pAddress, WaitEntry *pSelf);
};
inline ProcessWaitContainer gProcessWaitables;
}

View File

@ -11,6 +11,7 @@
#include "AuOSThread.hpp"
#include "AuThreadHandles.hpp"
#include "TLSView.hpp"
#include <Source/Threading/AuStopTheWorld.hpp>
#if defined(AURORA_IS_LINUX_DERIVED)
#include <sys/resource.h>
@ -1522,6 +1523,21 @@ namespace Aurora::Threading::Threads
}
void OSThread::TeminateOSContext(bool calledFromThis)
{
if (calledFromThis)
{
this->TeminateOSContext2(calledFromThis);
}
else
{
WakeOnAddressHoldContainersAndTheWorld([&]()
{
this->TeminateOSContext2(calledFromThis);
});
}
}
void OSThread::TeminateOSContext2(bool calledFromThis)
{
#if defined(AURORA_IS_MODERNNT_DERIVED)

View File

@ -75,6 +75,7 @@ namespace Aurora::Threading::Threads
void OSAttach();
void OSDeatach();
void TeminateOSContext(bool calledFromThis);
void TeminateOSContext2(bool calledFromThis);
void FreeOSContext();
bool HasValidThreadIdYet();
bool HasValidThreadHandleYet();

View File

@ -63,7 +63,7 @@ namespace Aurora::Threading::Threads
{
return;
}
AU_LOCK_GUARD(gVariables.gMutex);
AU_LOCK_GLOBAL_GUARD(gVariables.gMutex);
#if defined(AURORA_IS_POSIX_DERIVED)
AuTryInsert(gVariables.gUnixHandlesToThreads, id.unixThreadId, handle);
#endif
@ -76,7 +76,7 @@ namespace Aurora::Threading::Threads
{
return;
}
AU_LOCK_GUARD(gVariables.gMutex);
AU_LOCK_GLOBAL_GUARD(gVariables.gMutex);
#if defined(AURORA_IS_POSIX_DERIVED)
AuTryRemove(gVariables.gUnixHandlesToThreads, id.unixThreadId);
#endif
@ -155,7 +155,7 @@ namespace Aurora::Threading::Threads
{
return {};
}
AU_LOCK_GUARD(gVariables.gMutex);
AU_LOCK_GLOBAL_GUARD(gVariables.gMutex);
auto itr = gVariables.gUnixHandlesToThreads.find(validPid);
if (itr == gVariables.gUnixHandlesToThreads.end()) return {};
return itr->second;
@ -167,7 +167,7 @@ namespace Aurora::Threading::Threads
{
return {};
}
AU_LOCK_GUARD(gVariables.gMutex);
AU_LOCK_GLOBAL_GUARD(gVariables.gMutex);
auto itr = gVariables.gNativeHandlesToThreads.find(validPid);
if (itr == gVariables.gNativeHandlesToThreads.end()) return {};
return itr->second;