/*** Copyright (C) 2021 J Reece Wilson (a/k/a "Reece"). All rights reserved. File: Buffered.hpp Date: 2021-6-11 Author: Reece ***/ #pragma once namespace __audetail { struct BufferLock { Aurora::Memory::ByteBuffer *pThat; AuUInt8 *pIDC {}; inline BufferLock(Aurora::Memory::ByteBuffer *pThat) : pThat(pThat) { if (pThat) { AuAtomicAdd(&pThat->uInUseCounter, 1u); this->StrongLoad(); pThat->PrivateUserDataToUtilityMutex()->Lock(); } } inline ~BufferLock() { if (this->pThat) { AuAtomicSub(&pThat->uInUseCounter, 1u); pThat->PrivateUserDataToUtilityMutex()->Unlock(); } } private: #pragma optimize("", off) void StrongLoad() { // Strict load now. After this point, the compiler can lazy load with using these loads in its' emitted code or via the CPU cache // It would be nice to make these properly volatile without impacting our other high perf paths, somehow pIDC = AuAtomicLoad(&pThat->base); pIDC = AuAtomicLoad(&pThat->readPtr); pIDC = AuAtomicLoad(&pThat->writePtr); } #pragma optimize("", on) }; struct BufferAllocLock { Aurora::Memory::ByteBuffer *pThat; AuUInt8 *pIDC {}; inline BufferAllocLock(Aurora::Memory::ByteBuffer *pThat) : pThat(pThat) { if (pThat) { this->StrongLoad(); pThat->PrivateUserDataToUtilityMutex()->Lock(); } } inline ~BufferAllocLock() { if (this->pThat) { pThat->PrivateUserDataToUtilityMutex()->Unlock(); } } private: #pragma optimize("", off) void StrongLoad() { // Strict load now. After this point, the compiler can lazy load with using these loads in its' emitted code or via the CPU cache // It would be nice to make these properly volatile without impacting our other high perf paths, somehow pIDC = AuAtomicLoad(&pThat->base); pIDC = AuAtomicLoad(&pThat->readPtr); pIDC = AuAtomicLoad(&pThat->writePtr); } #pragma optimize("", on) }; } #include "BlobReader.hpp" #include "BlobSeekableReader.hpp" #include "BlobWriter.hpp" #include "BlobSeekableWriter.hpp" #include "ViewReader.hpp" #include "ViewSeekableReader.hpp" #include "ViewWriter.hpp" #include "ViewSeekableWriter.hpp"