[*] Fix C++17 build regressions

This commit is contained in:
Reece Wilson 2024-09-27 22:50:04 +01:00
parent f059141505
commit ff6409859f
10 changed files with 160 additions and 84 deletions

View File

@ -365,12 +365,14 @@ private:
AuFuture()
{
this->pid = AuAsync::GetCurrentWorkerPId();
this->uFlags = 0;
}
AuFuture(AuConsumer<Move_t> callback) :
callback(callback)
{
this->pid = AuAsync::GetCurrentWorkerPId();
this->uFlags = 0;
}
AuFuture(AuConsumer<Move_t> callback, ErrorCallback_f onFailure) :
@ -378,6 +380,7 @@ private:
onFailure(onFailure)
{
this->pid = AuAsync::GetCurrentWorkerPId();
this->uFlags = 0;
}
typename CppFun<T>::B value;
@ -392,10 +395,17 @@ private:
AuList<AuConsumer<bool, bool>> waterfall;
volatile void *pWorkItem {};
AuAUInt8 bComplete : 1 {};
AuAUInt8 bFailed : 1 {};
AuAUInt8 bDone : 1 {};
AuAUInt8 bDoneCb : 1 {};
union
{
struct
{
AuAUInt8 bComplete : 1;
AuAUInt8 bFailed : 1;
AuAUInt8 bDone : 1;
AuAUInt8 bDoneCb : 1;
};
AuUInt8 uFlags {};
};
friend struct AuWaterfall;
};
@ -640,10 +650,17 @@ private:
bool bDone {};
bool bFailed {};
#else
AuUInt8 bFailOnAny : 1{};
AuUInt8 bReady : 1 {};
AuUInt8 bDone : 1 {};
AuUInt8 bFailed : 1 {};
union
{
struct
{
AuUInt8 bFailOnAny : 1;
AuUInt8 bReady : 1;
AuUInt8 bDone : 1;
AuUInt8 bFailed : 1;
};
AuUInt8 uFlags {};
};
#endif
};
@ -679,13 +696,24 @@ namespace __detail
#endif
#endif
#if defined(AU_HasCoRoutinedIncluded)
#if defined(AU_LANG_CPP_17) && defined(AURORA_COMPILER_MSVC)
#if !defined(AU_HasVoidCoRoutineTraitsAvailable)
#define AU_HasVoidCoRoutineTraitsAvailable
#endif
#endif
#if defined(AU_NO_COROUTINES) || ((defined(AURORA_COMPILER_MSVC) && defined(AU_LANG_CPP_14)) || (defined(AURORA_COMPILER_CLANG) && defined(AU_LANG_CPP_14)) || (defined(AURORA_COMPILER_CLANG) && defined(AU_LANG_CPP_17)) || (defined(AURORA_COMPILER_MSVC) && !defined(_RESUMABLE_FUNCTIONS_SUPPORTED)))
#elif defined(AU_HasCoRoutinedIncluded)
#define __AUHAS_COROUTINES_CO_AWAIT
#else
#if !defined(AU_HasCoRoutinedNoIncludeIfAvailable)
#if defined(_RESUMABLE_FUNCTIONS_SUPPORTED) && defined(AU_LANG_CPP_17)
#include <experimental/coroutine>
#define AU_HasCoRoutineTraitsAvailable
#else
#include <coroutine>
#endif
#define __AUHAS_COROUTINES_CO_AWAIT
#endif
@ -695,6 +723,14 @@ namespace std
{
#if !defined(AU_HasVoidCoRoutineTraitsAvailable)
#if !defined(AU_HasCoRoutineTraitsAvailable)
template<typename ...T>
struct coroutine_traits
{
};
#endif
template<>
struct coroutine_traits<void>
{
@ -706,12 +742,12 @@ namespace std
void set_exception(exception_ptr const &) noexcept
{ }
std::suspend_always initial_suspend() noexcept
AU_CO_ROUTINE_SUS_ALWAYS initial_suspend() noexcept
{
return {};
}
std::suspend_always final_suspend() noexcept
AU_CO_ROUTINE_SUS_ALWAYS final_suspend() noexcept
{
return {};
}
@ -735,12 +771,12 @@ namespace std
void set_exception(exception_ptr const &) noexcept
{ }
std::suspend_always initial_suspend() noexcept
AU_CO_ROUTINE_SUS_ALWAYS initial_suspend() noexcept
{
return {};
}
std::suspend_always final_suspend() noexcept
AU_CO_ROUTINE_SUS_ALWAYS final_suspend() noexcept
{
return {};
}
@ -752,7 +788,6 @@ namespace std
{ }
};
};
#endif
}
@ -765,12 +800,12 @@ struct AuVoidTask
return {};
}
std::suspend_never initial_suspend()
AU_CO_ROUTINE_SUS_ALWAYS initial_suspend()
{
return {};
}
std::suspend_never final_suspend() noexcept
AU_CO_ROUTINE_SUS_ALWAYS final_suspend() noexcept
{
return {};
}

View File

@ -80,14 +80,14 @@ namespace Aurora::Memory
// Stable ByteBuffer ABI Header; u8 flags //
///////////////////////////////////////////////////////////////////////
AuUInt8 flagCircular : 1 {}; /// Is ring buffer?
AuUInt8 flagExpandable : 1 {}; /// Should resize linear buffer to accommodate additional writes
AuUInt8 flagReadError : 1 {}; /// Has error? Has read error?
AuUInt8 flagWriteError : 1 {}; /// Has error? Has write error?
AuUInt8 flagNoFree : 1 {}; /// Prevents all free operations
AuUInt8 flagNoRealloc : 1 {}; /// Prevents a subset of free options, specifically realloc, operations
AuUInt8 flagAlwaysExpandable : 1 {}; /// Internal flag. Do not use.
AuUInt8 flagReserveA : 1 {}; /// Placeholder
AuUInt8 flagCircular : 1; /// Is ring buffer?
AuUInt8 flagExpandable : 1; /// Should resize linear buffer to accommodate additional writes
AuUInt8 flagReadError : 1; /// Has error? Has read error?
AuUInt8 flagWriteError : 1; /// Has error? Has write error?
AuUInt8 flagNoFree : 1; /// Prevents all free operations
AuUInt8 flagNoRealloc : 1; /// Prevents a subset of free options, specifically realloc, operations
AuUInt8 flagAlwaysExpandable : 1; /// Internal flag. Do not use.
AuUInt8 flagReserveA : 1; /// Placeholder
///////////////////////////////////////////////////////////////////////
// Special flags/values

View File

@ -195,7 +195,9 @@ namespace Aurora::Memory
return !length || !base;
}
ByteBuffer::ByteBuffer(ByteBuffer &&buffer)
ByteBuffer::ByteBuffer(ByteBuffer &&buffer) :
flagCircular {}, flagExpandable {}, flagReadError {}, flagWriteError {},
flagNoFree {}, flagNoRealloc {}, flagAlwaysExpandable {}, flagReserveA {}
{
this->base = buffer.base;
this->length = buffer.length;
@ -220,7 +222,9 @@ namespace Aurora::Memory
buffer.alignment = {};
}
ByteBuffer::ByteBuffer(const ByteBuffer &buffer, bool preservePointers)
ByteBuffer::ByteBuffer(const ByteBuffer &buffer, bool preservePointers) :
flagCircular {}, flagExpandable { }, flagReadError {}, flagWriteError {},
flagNoFree {}, flagNoRealloc {}, flagAlwaysExpandable {}, flagReserveA {}
{
if (buffer.length)
{
@ -258,6 +262,7 @@ namespace Aurora::Memory
}
ByteBuffer::ByteBuffer(const void *in, AuUInt length, bool circular, bool expandable) :
flagNoFree {}, flagNoRealloc {}, flagAlwaysExpandable {}, flagReserveA {},
flagCircular(circular), flagExpandable(expandable), flagReadError(0), flagWriteError(0)
{
this->scaleSize = kBufferInitialPower;
@ -280,6 +285,7 @@ namespace Aurora::Memory
}
ByteBuffer::ByteBuffer(const MemoryViewRead &readView, bool circular, bool expandable) :
flagNoFree {}, flagNoRealloc {}, flagAlwaysExpandable {}, flagReserveA {},
flagCircular(circular), flagExpandable(expandable), flagReadError(0), flagWriteError(0)
{
this->scaleSize = kBufferInitialPower;
@ -297,6 +303,7 @@ namespace Aurora::Memory
}
ByteBuffer::ByteBuffer(const AuList<AuUInt8> &vector, bool circular, bool expandable) :
flagNoFree {}, flagNoRealloc {}, flagAlwaysExpandable {}, flagReserveA {},
flagCircular(circular), flagExpandable(expandable), flagReadError(0), flagWriteError(0)
{
this->scaleSize = kBufferInitialPower;
@ -314,6 +321,7 @@ namespace Aurora::Memory
}
ByteBuffer::ByteBuffer(const MemoryViewRead &readView, AuUInt uAlignment, bool circular, bool expandable) :
flagNoFree {}, flagNoRealloc {}, flagAlwaysExpandable {}, flagReserveA {},
flagCircular(circular), flagExpandable(expandable), flagReadError(0), flagWriteError(0)
{
this->scaleSize = kBufferInitialPower;
@ -336,6 +344,7 @@ namespace Aurora::Memory
}
ByteBuffer::ByteBuffer(AuUInt length, bool circular, bool expandable) :
flagNoFree {}, flagNoRealloc {}, flagAlwaysExpandable {}, flagReserveA {},
flagCircular(circular), flagExpandable(expandable), flagReadError(0), flagWriteError(0)
{
this->scaleSize = kBufferInitialPower;
@ -357,6 +366,7 @@ namespace Aurora::Memory
}
ByteBuffer::ByteBuffer(AuUInt length, AuUInt alignment, bool circular, bool expandable) :
flagNoFree {}, flagNoRealloc {}, flagAlwaysExpandable {}, flagReserveA {},
flagCircular(circular), flagExpandable(expandable), flagReadError(0), flagWriteError(0)
{
if (!length)
@ -380,6 +390,7 @@ namespace Aurora::Memory
template<typename T>
ByteBuffer::ByteBuffer(T *base, T *end, bool circular, bool expandable) :
flagNoFree {}, flagNoRealloc {}, flagAlwaysExpandable {}, flagReserveA {},
flagCircular(circular), flagExpandable(expandable), flagReadError(0), flagWriteError(0)
{
if (!base)
@ -408,6 +419,7 @@ namespace Aurora::Memory
}
ByteBuffer::ByteBuffer() :
flagNoFree {}, flagNoRealloc {}, flagAlwaysExpandable {}, flagReserveA {},
flagCircular(0), flagExpandable(true), flagReadError(0), flagWriteError(0)
{
this->base = {};

View File

@ -192,50 +192,66 @@ namespace Aurora
struct ThreadingConfig
{
// WARN: these values are not final
bool bNoThreadNames { false };
bool bPlatformIsSMPProcessorOptimized { true }; // Whether to attempt to using mm_pause or similar instruction before yielding into the kernel
AuUInt16 uSpinLoopPowerA { 128 }; // Nudgable spinloop power. This is our local userland niceness factor
// This is comparable to Win32's SetCriticalSectionSpinCount applied across every single AuThreadPrimitives try-lock and lock.
// Adjust this value to compensate for longer critical sections when context switching isn't preferrable.
AuUInt64 bEnableAggressiveScheduling : 1 { false };
AuUInt64 bEnableAgrSchedulingRatelimit : 1 { true };
AuUInt64 bPreferNt51XpMutexesOver8 : 1 { false };
AuUInt64 bPreferNt51XpCondvarsOver8 : 1 { false };
AuUInt64 bPreferNtCondvarModernWinSpin : 1 { false };
AuUInt64 bPreferNtCondvarOlderWinSpin : 1 { true };
AuUInt64 bPreferNtSemaphoreSpinTryLock : 1 { true };
AuUInt64 bPreferNtMutexSpinTryLock : 1 { true };
AuUInt64 bPreferNtCondMutexSpinTryLock : 1 { false };
AuUInt64 bPreferLinuxSemaphoreSpinTryLock : 1 { true };
AuUInt64 bPreferLinuxMutexSpinTryLock : 1 { true };
AuUInt64 bPreferLinuxCondMutexSpinTryLock : 1 { true };
#if 0
AuUInt64 bPreferEmulatedWakeOnAddress : 1 { false };
// Resets everything assuming we dont have default initialization (c++14) or we cannot bit default initialize (c++20).
// This is a struct local clear bit for on init.
#if defined(AU_LANG_CPP_20_)
bool bResetToRuntimeDefaults { false };
#else
AuUInt64 bPreferEmulatedWakeOnAddress : 1 { !AuBuild::kIsNtDerived /*everybody else requires us to hit the kernel. */ };
bool bResetToRuntimeDefaults { true };
#endif
AuUInt64 bPreferWaitOnAddressAlwaysSpin : 1 { false }; // ..., if emulated! if double-spinning under higher level locks, disable me.
AuUInt64 bPreferWaitOnAddressAlwaysSpinNative : 1 { !AuBuild::kIsNtDerived }; // ..., if not emulated! noting that most kernels and user-schedulers will spin for you
AuUInt64 bPreferRWLockReadLockSpin : 1 { true };
AuUInt64 bUWPNanosecondEmulationCheckFirst : 1 { false };
AuUInt64 uUWPNanosecondEmulationMaxYields : 7 { 12 };
AuUInt64 bForceEnableAdaptiveSpin : 1 { false }; // ||
AuUInt64 bPreferEnableAdaptiveSpin : 1 { true }; // .
AuUInt64 bPreferLinuxAdaptiveSpin : 1 { true }; // (&&)
AuUInt64 bPreferOldWin32AdaptiveSpin : 1 { false }; // (&&)
AuUInt64 bPreferNewWin32AdaptiveSpin : 1 { true }; // (&&)
AuUInt64 uAdaptiveSpinCUCnt0 : 4 { 0 };
AuUInt64 uAdaptiveSpinCUCnt4 : 4 { 2 };
AuUInt64 uAdaptiveSpinCUCnt8 : 4 { 2 };
AuUInt64 uAdaptiveSpinCUCnt16 : 4 { 4 };
AuUInt64 bPreferFutexRWLock : 1 { true };
AuUInt64 bWinXpThrough7BlazeOptimizerPower : 12 { 300 }; // dont worry about it. we dont care about old portables. lets try to make older win32 targets tweak the scheduling in our favor a bit.
AuUInt64 bPreferLinuxPrimitivesFutexNoSpin : 1 { false };
AuUInt64 bPreferUnixPrimitivesNoSpin : 1 { false };
AuUInt64 bAlwaysRWLockWriteBiasOnReadLock : 1 { false };
AuUInt64 bEnableRWLockWriteBiasOnReadLock : 1 { true };
AuUInt64 bPreferFutexEvent : 1 { true };
bool bNoThreadNames { false };
bool bPlatformIsSMPProcessorOptimized { true }; // Whether to attempt to using mm_pause or similar instruction before yielding into the kernel
AuUInt16 uSpinLoopPowerA { 128 }; // Nudgable spinloop power. This is our local userland niceness factor
// This is comparable to Win32's SetCriticalSectionSpinCount applied across every single AuThreadPrimitives try-lock and lock.
// Adjust this value to compensate for longer critical sections when context switching isn't preferrable.
// Using 128 as a default (bouncing around 64 and 512)
// Facebook says half this (cant find src), I used to say about 82 to 512, Windows 7s implementation of CRITICAL_SECTION and SRWLOCK says double that (256), for aggressive (and now incorrect) spin mutex examples ive seen around 2k or less, for not so aggressive pause loops ive seen people use 32-128-ish pauses (also incorrect), dumb shits parroting Win9x documentation and SetCriticalSectionSpinCount example value think you need above >= 4k (stackexchange man strike again).
// Personally, I've seen this tested on 5-12th gen intel, Windows 7 through 11, Linux, and various other configurations.
// Personally, I've seen this run Qt with less CPU resources than every other Qt process on Win7. I've seen this run JavaScript programs dead last on the taskmanagers detail panel, on both 10 and 7.
// 128 to 512 is fine, unless you need to start asserting you are a real time application aware of your hardware requirements / have properly matched task affinity / etc, and don't mind shredding old processor power efficiency while chewing thru nop cycles
// <<<<<<<<<<<<<<< (QA:) Each applications will probably need its own nudge value
AuUInt64 bEnableAggressiveScheduling : 1 AU_BIT_FIELD_INIT_AFTER_20( false ); // <<<<<<<<<<<<<<< (SHIP:) ENABLE ME FOR AROUND 1MS OR LESS SCHED RESOLUTION
AuUInt64 bEnableAgrSchedulingRatelimit : 1 AU_BIT_FIELD_INIT_AFTER_20( true );
AuUInt64 bPreferNt51XpMutexesOver8 : 1 AU_BIT_FIELD_INIT_AFTER_20( false ); // under modern versions of windows, do not use keyedevents. use the native waitonaddress internals, then waitonaddress proper; and dont touch keyedevents paths.
AuUInt64 bPreferNt51XpCondvarsOver8 : 1 AU_BIT_FIELD_INIT_AFTER_20( false ); // under modern versions of windows, do not use keyedevents. use the native waitonaddress internals, then waitonaddress proper; and dont touch keyedevents paths.
AuUInt64 bPreferNtCondvarModernWinSpin : 1 AU_BIT_FIELD_INIT_AFTER_20( false ); // very modern cpus have monitor / tpause / etc intrins. sometimes like us, microsoft will use them in userspace under waitonaddress of very modern windows builds. i wouldn't rely on that. we implement spinning ourselves for linux + old win32 for 2 decades worth of processors.
AuUInt64 bPreferNtCondvarOlderWinSpin : 1 AU_BIT_FIELD_INIT_AFTER_20( true ); // windows 7 and lower sees better CPU + power draw when we implement spinning ourselves on top of the the dreaded bidirectionally blocking keyedevents. besides, msft refused to backport userland monitor (very modern chipsets) to old versions of 10 and 7.
AuUInt64 bPreferNtSemaphoreSpinTryLock : 1 AU_BIT_FIELD_INIT_AFTER_20( true );
AuUInt64 bPreferNtMutexSpinTryLock : 1 AU_BIT_FIELD_INIT_AFTER_20( true );
AuUInt64 bPreferNtCondMutexSpinTryLock : 1 AU_BIT_FIELD_INIT_AFTER_20( false );
AuUInt64 bPreferLinuxSemaphoreSpinTryLock : 1 AU_BIT_FIELD_INIT_AFTER_20( true );
AuUInt64 bPreferLinuxMutexSpinTryLock : 1 AU_BIT_FIELD_INIT_AFTER_20( true );
AuUInt64 bPreferLinuxCondMutexSpinTryLock : 1 AU_BIT_FIELD_INIT_AFTER_20( true );
#if 0
AuUInt64 bPreferEmulatedWakeOnAddress : 1 AU_BIT_FIELD_INIT_AFTER_20( false );
#else
AuUInt64 bPreferEmulatedWakeOnAddress : 1 AU_BIT_FIELD_INIT_AFTER_20( !AuBuild::kIsNtDerived ); // ...,everybody else requires us to hit the kernel
#endif
AuUInt64 bPreferWaitOnAddressAlwaysSpin : 1 AU_BIT_FIELD_INIT_AFTER_20( false ); // ..., if emulated! if double-spinning under higher level locks, disable me.
AuUInt64 bPreferWaitOnAddressAlwaysSpinNative : 1 AU_BIT_FIELD_INIT_AFTER_20( !AuBuild::kIsNtDerived ); // ..., if not emulated! noting that most kernels and user-schedulers will spin for you. nt users can expect ntdll to spin / pause / monitor / etc, under * modern * win32 versions.
AuUInt64 bPreferRWLockReadLockSpin : 1 AU_BIT_FIELD_INIT_AFTER_20( true );
AuUInt64 bUWPNanosecondEmulationCheckFirst : 1 AU_BIT_FIELD_INIT_AFTER_20( false );
AuUInt64 uUWPNanosecondEmulationMaxYields : 7 AU_BIT_FIELD_INIT_AFTER_20( 12 );
AuUInt64 bForceEnableAdaptiveSpin : 1 AU_BIT_FIELD_INIT_AFTER_20( false ); // ||
AuUInt64 bPreferEnableAdaptiveSpin : 1 AU_BIT_FIELD_INIT_AFTER_20( true ); // .
AuUInt64 bPreferLinuxAdaptiveSpin : 1 AU_BIT_FIELD_INIT_AFTER_20( true ); // (&&)
AuUInt64 bPreferOldWin32AdaptiveSpin : 1 AU_BIT_FIELD_INIT_AFTER_20( false ); // (&&)
AuUInt64 bPreferNewWin32AdaptiveSpin : 1 AU_BIT_FIELD_INIT_AFTER_20( true ); // (&&)
AuUInt64 uAdaptiveSpinCUCnt0 : 4 AU_BIT_FIELD_INIT_AFTER_20( 0 ); // boring thread topology assumptions
AuUInt64 uAdaptiveSpinCUCnt4 : 4 AU_BIT_FIELD_INIT_AFTER_20( 2 ); // boring thread topology assumptions
AuUInt64 uAdaptiveSpinCUCnt8 : 4 AU_BIT_FIELD_INIT_AFTER_20( 2 ); // boring thread topology assumptions
AuUInt64 uAdaptiveSpinCUCnt16 : 4 AU_BIT_FIELD_INIT_AFTER_20( 4 );
AuUInt64 bPreferFutexRWLock : 1 AU_BIT_FIELD_INIT_AFTER_20( true ); // Win10+ and Linux should use futexes inside the AuRWLock primitive, vs other dumber primitives built on similar futex abstraction, both that'll perform about the same regardless.
// Once taking to account other platform specific member overhead, making this compile time isnt worth it in memory and in the CPU-overhead. Enjoy the extra compat (incl WinXP, for almost free).
// Considering we beat pthreads, 3 STLs, Win32 primitives in API functionality and in legacy XP compat, we're * hundreds * of bytes less than a bad STL (incl llvm and msvc), I think our RWLock is fine.
// Making it any smaller would require a different API, different tooling assumptions, and different CPU branching overhead assumptions.
// Even the CPU branching implications of a *portable, potentially-relinkable, potentially-asm* thread id check destroys the excuse for a smaller Aurora::Threading::Waitables futex reimplementation.
AuUInt64 bWinXpThrough7BlazeOptimizerPower : 12 AU_BIT_FIELD_INIT_AFTER_20( 300 ); // dont worry about it. we dont care about old portables. lets try to make older win32 targets tweak the scheduling in our favor a bit.
AuUInt64 bPreferLinuxPrimitivesFutexNoSpin : 1 AU_BIT_FIELD_INIT_AFTER_20( false );
AuUInt64 bPreferUnixPrimitivesNoSpin : 1 AU_BIT_FIELD_INIT_AFTER_20( false );
AuUInt64 bAlwaysRWLockWriteBiasOnReadLock : 1 AU_BIT_FIELD_INIT_AFTER_20( false );
AuUInt64 bEnableRWLockWriteBiasOnReadLock : 1 AU_BIT_FIELD_INIT_AFTER_20( true );
AuUInt64 bPreferFutexEvent : 1 AU_BIT_FIELD_INIT_AFTER_20( true ); // Win10+ and Linux should use a futex inside the AuEvent / AuThreadPrimitive event as the hybrid binary-semaphore/cross/event's signal flag.
};
struct DummyConfig
@ -268,7 +284,7 @@ namespace Aurora
// On other POSIX platforms, its best to keep timers emulated in process.
// By default, you have to opt into a higher res in-process, if required by the platform.
// This does not bypass IO yielding or timeouts; we simply use our own semaphore and scheduler instead of the kernels.
// It only takes 8k ns to 60k ns depending on the platform to wake a thread, and we can hit AuAsync scheduler without too much error, we can just do this instead of relying on historically shitty IO primitives.
// It only takes 8k ns to 60k ns depending on the platform to wake a thread, and we can hit AuAsync scheduler without too much error; we can just do this instead of relying on historically shitty IO primitives.
// Assiging this to true, on any platform, will bypass the kernels timer ticker.
bool bForceAltOSTimerPrimitives { false };

View File

@ -207,6 +207,12 @@ namespace Aurora
{
gRuntimeConfig = info;
if (gRuntimeConfig.threadingConfig.bResetToRuntimeDefaults)
{
// <= C++17 users will struggle to fill this bit field and I dont want a massive properties object library yet just to handle runtime init.
AuResetMember(gRuntimeConfig.threadingConfig);
}
if (gRuntimeHasStarted)
{
SysPanic("Do not nest RuntimeStart/RuntimeShutdowns. Modules should respect RuntimeHasStarted()");

View File

@ -98,10 +98,17 @@ namespace Aurora::IO
mutable AuOptional<bool> optIsFile;
mutable AuOptional<bool> optIsPipe;
mutable AuOptional<bool> optIsTTY;
AuUInt8 bShouldWriteEoS : 1 { 0 };
AuUInt8 bFlushOnClose : 1 { 0 };
AuUInt8 bDirectIO : 1 { 0 };
AuUInt8 bIsAsync : 1 { 0 };
union
{
struct
{
AuUInt8 bShouldWriteEoS : 1;
AuUInt8 bFlushOnClose : 1;
AuUInt8 bDirectIO : 1;
AuUInt8 bIsAsync : 1;
};
AuUInt8 uFlags {};
};
AuAUInt32 uLock { 0 };
inline void Lock()

View File

@ -83,7 +83,7 @@ namespace Aurora::IO::FS
auto utf8Root = fmt::format("\\\\.\\{}", root);
auto utf8Root2 = utf8Root;
if (utf8Root.ends_with("\\"))
if (AuEndsWith(utf8Root, "\\"))
{
utf8Root.pop_back();
}
@ -98,7 +98,7 @@ namespace Aurora::IO::FS
auto utf8Root = fmt::format("\\\\.\\{}", root);
auto utf8Root2 = utf8Root;
if (utf8Root.ends_with("\\"))
if (AuEndsWith(utf8Root, "\\"))
{
utf8Root.pop_back();
}

View File

@ -533,12 +533,12 @@ namespace Aurora::IO::FS
return;
}
if (str.ends_with('\\'))
if (AuEndsWith(str, '\\'))
{
return;
}
if (str.ends_with('/'))
if (AuEndsWith(str, '/'))
{
return;
}

View File

@ -62,7 +62,7 @@ namespace Aurora::Process
try
{
if (!path.ends_with(AuFS::kPathSplitter))
if (!AuEndsWith(path, AuFS::kPathSplitter))
{
path += AuFS::kPathSplitter;
}
@ -224,7 +224,7 @@ namespace Aurora::Process
gCachedModule = gCachedFullPath.substr(indexA + 1);
gCachedPartialPath = gCachedFullPath.substr(0, indexA);
if (!gCachedPartialPath.ends_with(AuFS::kPathSplitter))
if (!AuEndsWith(gCachedPartialPath, AuFS::kPathSplitter))
{
gCachedPartialPath += AuFS::kPathSplitter;
}

View File

@ -32,17 +32,17 @@ namespace Aurora::Process
struct ModuleBasePairEq
{
constexpr bool operator()(const ModuleBasePair &lhs, const AuString &rhs) const
AU_CONSTEXPR_20 bool operator()(const ModuleBasePair &lhs, const AuString &rhs) const
{
return lhs.module == rhs;
}
constexpr bool operator()(const ModuleBasePair &lhs, const AuUInt &rhs) const
AU_CONSTEXPR_20 bool operator()(const ModuleBasePair &lhs, const AuUInt &rhs) const
{
return lhs.modBase == rhs;
}
constexpr bool operator()(const ModuleBasePair &lhs, const ModuleBasePair &rhs) const
AU_CONSTEXPR_20 bool operator()(const ModuleBasePair &lhs, const ModuleBasePair &rhs) const
{
return rhs.modBase ? lhs.modBase == rhs.modBase : (rhs.module.size() ? rhs.module == rhs.module : false);
}